diff --git a/.cursor/rules/agent-wdd.mdc b/.cursor/rules/agent-wdd.mdc
deleted file mode 100644
index bbf34cc..0000000
--- a/.cursor/rules/agent-wdd.mdc
+++ /dev/null
@@ -1,16 +0,0 @@
----
-description:
-globs:
-alwaysApply: true
----
-
-# 你是一个精通cobra的大师,熟知现代化CLI工具的开发,能够实现架构健壮的CLI程序
-- 能够设计出功能完善,健壮的交互式CLI
-
-# 项目代码风格要求
-- 函数的参数注释,返回注释,功能注释
-- 在代码编写过程中,使用 [CallerLog.go](mdc:agent-wdd/log/CallerLog.go) 多打印日志
-- 代码中也有详细的流程说明,功能说明
-
-
-
diff --git a/.cursor/rules/cert-manager-wdd.mdc b/.cursor/rules/cert-manager-wdd.mdc
deleted file mode 100644
index 59801a9..0000000
--- a/.cursor/rules/cert-manager-wdd.mdc
+++ /dev/null
@@ -1,35 +0,0 @@
----
-description: 关于WDD实现的证书管理工具的设计概要
-globs:
-alwaysApply: false
----
-
-
-# 模块目的
-- 设置一个SSL证书申请 有效期管理的简单工具
-
-### 参考项目
-- 是 https://github.com/cert-manager/cert-manager 的超级简化版本
-- 是 https://github.com/acmesh-official/acme.sh 的平行替代版本
-
-## 申请证书
-- 输入一个域名,可以支持Cloudflare DNS验证的方式申请域名
-- 申请域名的格式默认为ec-256
-- CA服务器 不要违反其滥用策略 保持合适的等待时间
- - ZeroSSL
- - Let's Encrypt
-- 证书目录
- - 设置默认的证书保存目录,按照域名的形式保存证书
- - 通配域名 如 *.qq.com的文件夹名称用 x.qq.com表示
-
-## 证书查看
-- 能够查看所有申请的证书信息
-- domain名称
-- 注册时间
-- 到期时间
-- 是否需要更新(有效期不足30天)
-
-## 证书更新
-- 传入domain名称
-- 查询该证书的详细信息
-- 重新为该domain申请新的证书
diff --git a/.cursor/rules/cloudflare.mdc b/.cursor/rules/cloudflare.mdc
deleted file mode 100644
index 0fe8cc3..0000000
--- a/.cursor/rules/cloudflare.mdc
+++ /dev/null
@@ -1,144 +0,0 @@
----
-description: cloudflare相关的说明
-globs:
-alwaysApply: false
----
-
-# Cloudflare 域管理及DNS管理功能
-- cloudflare的统一返回结构体为
-{
- "result": any,
- "result_info": {
- "page": 1,
- "per_page": 20,
- "total_pages": 1,
- "count": 1,
- "total_count": 1
- },
- "success": true,
- "errors": [
- {
- "code": 6003,
- "message": "Invalid request headers",
- "error_chain": [
- {
- "code": 6111,
- "message": "Invalid format for Authorization header"
- }
- ]
- }
- ],
- "messages": []
-}
-
-
-## domain管理功能
-
-### domain的查询功能
-- 返回结构体如下
-{
- "result": [
- {
- "id": "511894a4f1357feb905e974e16241ebb",
- "name": "107421.xyz",
- "status": "active",
- "paused": false,
- "type": "full",
- "development_mode": 0,
- "name_servers": [
- "ali.ns.cloudflare.com",
- "walt.ns.cloudflare.com"
- ],
- "original_name_servers": [
- "ns1.dnsowl.com",
- "ns2.dnsowl.com",
- "ns3.dnsowl.com"
- ],
- "original_registrar": null,
- "original_dnshost": null,
- "modified_on": "2021-11-03T01:47:42.506130Z",
- "created_on": "2021-11-03T01:33:07.447056Z",
- "activated_on": "2021-11-03T01:47:42.506130Z",
- "meta": {
- "step": 2,
- "custom_certificate_quota": 0,
- "page_rule_quota": 3,
- "phishing_detected": false
- },
- "owner": {
- "id": null,
- "type": "user",
- "email": null
- },
- "account": {
- "id": "dfaadeb83406ef5ad35da02617af9191",
- "name": "Zeaslity@gmail.com's Account"
- },
- "tenant": {
- "id": null,
- "name": null
- },
- "tenant_unit": {
- "id": null
- },
- "permissions": [
- "#zone:read",
- "#zone_settings:read",
- "#dns_records:edit",
- "#dns_records:read"
- ],
- "plan": {
- "id": "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
- "name": "Free Website",
- "price": 0,
- "currency": "USD",
- "frequency": "",
- "is_subscribed": false,
- "can_subscribe": false,
- "legacy_id": "free",
- "legacy_discount": false,
- "externally_managed": false
- }
- }
- ],
- "result_info": {
- "page": 1,
- "per_page": 20,
- "total_pages": 1,
- "count": 1,
- "total_count": 1
- },
- "success": true,
- "errors": [],
- "messages": []
-}
-
-
-## DNS管理功能实现
-- 需要实现特定domain的DNS的管理
-- 参考文档 https://developers.cloudflare.com/api/resources/dns/subresources/records/
-
-### 查询全部DNS记录
-- 单条DNS记录的结构体如下 {
- "id": "b940757dfd94e8e2941e2ec1b2ee0515",
- "name": "mail.107421.xyz",
- "type": "MX",
- "content": "mx2.yuanyoupush.com",
- "priority": 20,
- "proxiable": false,
- "proxied": false,
- "ttl": 1,
- "settings": {},
- "meta": {},
- "comment": "全自动邮箱",
- "tags": [],
- "created_on": "2025-03-15T05:56:37.296599Z",
- "modified_on": "2025-03-15T05:57:08.180765Z",
- "comment_modified_on": "2025-03-15T05:56:37.296599Z"
- },
-
-### 更新某个DNS记录
-
-### 删除某个DNS记录
-
-### 新增一个DNS记录
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 7c5a1f9..5cb9632 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -4,10 +4,26 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -67,8 +83,11 @@
"Go Test.DCU-RKE-35.80.executor": "Run",
"Go Test.DCU-中间件.executor": "Run",
"Go Test.DCU全部CMII镜像.executor": "Run",
+ "Go Test.TestCmiiEnvDeploy_ChengDuGongYeXueYuan in wdd.io/agent-operator.executor": "Run",
+ "Go Test.TestCmiiEnvDeploy_MianYangDiKong in wdd.io/agent-operator.executor": "Run",
"Go Test.TestCmiiEnvDeploy_WddSuperCluster in wdd.io/agent-operator (1).executor": "Run",
"Go Test.TestCmiiEnvDeploy_WddSuperCluster in wdd.io/agent-operator.executor": "Run",
+ "Go Test.TestCmiiEnvDeploy_XinJiangAKeSu in wdd.io/agent-operator.executor": "Run",
"Go Test.TestCmiiEnvDeploy_XiongAnJianGuanPingTai in wdd.io/agent-operator.executor": "Run",
"Go Test.TestCmiiEnvDeploy_XiongAnKongNengYuan in wdd.io/agent-operator.executor": "Run",
"Go Test.TestCmiiEnvDeploy_ZhejiangErjiPingTai in wdd.io/agent-operator.executor": "Run",
@@ -81,6 +100,7 @@
"PowerShell.one-build-and-upload.ps1 (1).executor": "Run",
"PowerShell.one-build-and-upload.ps1.executor": "Run",
"RunOnceActivity.ShowReadmeOnStart": "true",
+ "RunOnceActivity.TerminalTabsStorage.copyFrom.TerminalArrangementManager.252": "true",
"RunOnceActivity.git.unshallow": "true",
"RunOnceActivity.go.formatter.settings.were.checked": "true",
"RunOnceActivity.go.migrated.go.modules.settings": "true",
@@ -105,6 +125,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -113,7 +166,7 @@
-
+
@@ -127,30 +180,35 @@
+
+
+
-
-
-
-
-
+
+
+
+
+
-
+
+
+
-
-
+
+
@@ -246,7 +304,32 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -296,7 +379,15 @@
1768811251589
-
+
+
+ 1768879739180
+
+
+
+ 1768879739180
+
+
@@ -320,7 +411,8 @@
-
+
+
diff --git a/1-designs/1-改造计划.md b/1-designs/1-改造计划.md
new file mode 100644
index 0000000..621c234
--- /dev/null
+++ b/1-designs/1-改造计划.md
@@ -0,0 +1,11 @@
+
+agent-deploy的核心定位是
+1. 自己实现的的HELM工具 用于管理CMII的部署YAML文件
+2. 生成完整的部署 YAML 文件
+ 1. 中间件
+ 2. k8s-dashboard
+ 3. CMII业务部署的YAML文件
+3. 提供API接口
+ 1. 提供特定YAML的生成接口
+4. 业务部署参数
+ 1. 所有部署的中间件参数应该被提取出来,统一管理
diff --git a/1-designs/WddSuperAgent-DDS.md b/1-designs/WddSuperAgent-DDS.md
new file mode 100644
index 0000000..d098823
--- /dev/null
+++ b/1-designs/WddSuperAgent-DDS.md
@@ -0,0 +1,90 @@
+# WddSuperAgent DDS(现代化改造草案)
+
+## 背景与总体目标
+- WddSuperAgent 由多个 Go 模块组成:`agent-wdd`(通用 CLI 工具)、`agent-common`(工具库/常量/镜像命名转换等)、`agent-deploy`(K8s 部署模板生成)、`agent-operator`(运维中枢:镜像同步、K8s 操作、Harbor/MinIO 交互)。目前模块耦合度高、流程以脚本式函数为主,缺少统一 API 与测试保障。
+- 目标:以“运维服务化”为方向,引入 Gin 提供标准接口,按领域拆分服务层/适配层,完善配置、安全与测试体系,并为后续 UI/自动化编排提供稳定基础。
+
+## 现状梳理(按模块)
+- `agent-common`:提供 logger、断言、文件/字符串等工具,镜像命名转换(`image` 包)、项目镜像列表(`real_project/*`),缺少接口抽象和错误语义。
+- `agent-deploy`(依赖 agent-common):生成/应用 CMII 相关的 K8s 资源模板(Dashboard、NFS、Middlewares、App、Ingress、Env Config),通过 `CommonEnvironmentConfig` 拼装 YAML/Apply 文件;存在大量硬编码路径与全局变量。
+- `agent-operator`(依赖 agent-common、agent-deploy):运维中枢,职责包含:
+ - 镜像同步 DCU/DLTU 流程(`CmiiImageSyncOperator`):Download → Compress → Upload/Push;支持从 Demo/RKE/Middle/版本号拉取,MinIO 上传,Harbor 直推(tag + push)。
+ - Docker/Harbor/MinIO 适配(`image`、`minio` 包),K8s 适配(`CmiiK8sOperator`,Deployment/Pod/Node 的查询、缩容、重启、打 tag 等),部分函数含硬编码认证信息。
+ - main 以交互式模式运行(mode=image/operator),无 HTTP API,流程控制与状态管理零散。
+- `agent-wdd`(独立):Cobra CLI 瑞士军刀(zsh、proxy、acme、Cloudflare、host info 等),与 CMII 运维解耦。
+
+## 关键现有流程
+- 镜像同步(DCU/DLTU):`ImageSyncEntity` 描述 Download/Compress/Upload 条件,`A_DownloadCompressUpload` 执行拉取→压缩→上传,`A_DownloadLoadTagUpload` 负责离线包加载+重 tag + 推送;大量文件系统操作与全局路径(`OfflineImageGzipFolderPrefix`)。
+- K8s 操作:`CmiiK8sOperator` 封装 client-go,基于 namespace 切换进行 Deployment/StatefulSet/Pod/Node 的查询、扩缩容、重启、删 pod/evict 等。
+- 部署生成:`OctopusDeploy` 依靠模板包组合环境,按镜像 map 生成并 apply。
+
+## 主要痛点
+- 架构层次不足:领域逻辑、基础设施操作、流程编排混杂在同一包内;全局变量与硬编码路径/凭据多。
+- 可测试性弱:测试多为打印/人工验证,`SaveImageListToGzipFile` 等函数甚至返回 stub;缺少接口注入、mock 与集成回归。
+- 接口形态单一:仅 CLI/交互模式,缺少 HTTP API/任务编排,不利于自动化或 UI 集成。
+- 配置与安全:凭据明文/硬编码(Harbor RegistryAuth、kubeconfig 路径等),无统一配置加载与密钥管理。
+- 可靠性/可观察性:缺少健康探针、审计日志、指标/追踪;IO/网络无超时控制,错误语义不清晰。
+
+## 目标架构(分层与模块化)
+- **项目结构(示例)**
+ ```
+ /cmd/operator (HTTP + CLI 入口)
+ /cmd/wdd (保留独立 CLI)
+ /pkg/domain (实体与用例协议:ImageSyncSpec、DeployPlan、ClusterTarget 等)
+ /pkg/service (应用层:ImageSyncService、DeployService、K8sService、RegistryService、StorageService)
+ /pkg/infra (适配层:Docker/Harbor client、MinIO client、K8s client、Template renderer、Config)
+ /pkg/transport/http (Gin handler,DTO 校验,统一响应)
+ /pkg/transport/cli (Cobra 命令适配,复用同一 service)
+ /pkg/obs (日志/指标/追踪/审计)
+ /configs (环境化配置,区分 dev/test/prod)
+ ```
+- **依赖关系**:`transport -> service -> infra`,`domain` 为纯模型;`agent-wdd` 继续独立,但可共用 `pkg/infra` 的下载/文件工具。
+- **并发/任务模型**:对长耗时的镜像同步/压缩/上传采用异步任务(队列或 goroutine + work pool),任务状态持久化(可选 Bolt/SQLite/Redis),Gin 提供查询/取消接口。
+
+## 组件设计与接口草案
+- **Domain 模型**
+ - `ImageSyncSpec`:源列表(cmiiNameTags/fullNames/projectVersion/sourceAuth)、压缩策略(split/monolithic、目标目录)、上传策略(minio bucket/oss prefix)、推送策略(target harbor/cred)、更新策略(是否更新集群 tag)。
+ - `DeployPlan`:目标集群/namespace、镜像 map、模板参数(NFS/Ingress/SRS 等)。
+ - `ClusterTarget`:kubeconfig/上下文/namespace,执行策略(超时、并发度)。
+- **Service 层**
+ - `ImageSyncService`:封装 Download→Compress→Upload→Push,用接口注入 `RegistryClient`、`StorageClient`、`Compressor`、`ImageNamer`;支持幂等与断点续跑(跳过已存在 gzip)。
+ - `DeployService`:生成/渲染/Apply K8s 清单;可选 dry-run;依赖 `K8sApplier`(kubectl/go-client)。
+ - `K8sService`:Deployment/StatefulSet/Pod/Node 读写操作,暴露给 HTTP/CLI;支持批量重启/cordon/evict。
+ - `ConfigService`:集中配置加载(Viper),支持 env 覆盖与 secret 挂载。
+ - `ObsService`:日志封装(zap sugared logger)、Prometheus 指标(请求量/耗时/失败率/队列长度)、OpenTelemetry trace。
+- **Transport(Gin)**
+ - `/api/v1/images/sync` `POST`:提交 `ImageSyncSpec`,返回任务 id。
+ - `/api/v1/images/tasks/:id` `GET`:查询任务状态/日志片段。
+ - `/api/v1/registry/push` `POST`:源→目标 Harbor 的 tag/push。
+ - `/api/v1/deploy/apply` `POST`:提交 `DeployPlan`,可选择 `dryRun`。
+ - `/api/v1/k8s/deployments/:ns/:name/tag` `PUT`:更新镜像 tag。
+ - `/api/v1/healthz`、`/metrics`:健康检查与指标暴露。
+- **CLI(Cobra)**
+ - 复用同一 service:`wdd image sync ...`,`wdd deploy apply ...`,与 HTTP 行为保持一致。
+
+## 关键改造点与落地方案
+- **解耦与接口化**:为 Docker/Harbor/MinIO/K8s 定义接口(`RegistryClient`, `StorageClient`, `K8sClient`),在服务层通过依赖注入;现有实现迁移至 `infra`,便于 mock。
+- **配置/密钥**:移除硬编码(RegistryAuth、kubeconfig 路径等),采用 `configs/*.yaml` + 环境变量覆盖;敏感信息通过文件挂载或 Secret 管理,Gin 层仅传递引用。
+- **路径与存储**:统一离线镜像根路径(可配置),将 txt/gzip 列表输出迁移到任务上下文目录;为分片压缩/单包压缩提供策略接口。
+- **错误与重试**:下载/上传/推送增加上下文超时与可配置重试;对局部失败的镜像保留失败列表与诊断信息。
+- **并发与资源控制**:镜像拉取/压缩采用 worker pool(并发上限可配置),避免当前全局 goroutine + 共享 slice 可能的竞态。
+- **可观察性**:统一日志格式;Gin middleware 注入 request-id;暴露 Prometheus 指标与 pprof;长任务输出阶段进度。
+- **安全**:移除代码中的 base64 凭据;K8s 操作增加 RBAC 范围与 kubeconfig 选择;上传/下载校验 hash(md5/sha256)。
+- **兼容性**:保留现有 DCU/DLTU 能力,通过 CLI 子命令封装新的 service,实现“新 API + 旧脚本可用”。
+
+## 测试与质量保障
+- **单元测试**:引入 `testify`/`gomock`,对核心服务(ImageSyncService/K8sService/DeployService)做 mock 测试;覆盖命名转换、列表生成、失败分支。
+- **集成测试**:使用 kind + 本地 registry(或 test Harbor mock)、本地 MinIO 容器,跑镜像下载→压缩→推送→K8s apply 的端到端流程;提供 `make test-integration`。
+- **契约测试**:Gin API 使用 `httptest` 覆盖请求校验/错误码;CLI 使用 golden 测试。
+- **静态检查**:启用 `golangci-lint`(vet, ineffassign, errcheck, revive),`go test ./...` 作为最小门槛。
+
+## 迁移路线(建议三步)
+1) **基础设施抽象**:提炼 Registry/K8s/Storage/Compressor 接口,将现有实现迁入 `pkg/infra`,补齐 `SaveImageListToGzipFile` 等 stub,清理硬编码凭据/路径。
+2) **服务化与测试**: 新建 `pkg/service`,重写 DCU/DLTU 流程为可注入的 `ImageSyncService`;构建 Gin 入口与 API DTO,补足单元/集成测试与 CI 流水线。
+3) **运维增强**: 引入任务队列/状态持久化、指标/日志/trace,完善 K8s 操作 API,逐步替换旧 CLI 逻辑为 service 调用,并整理配置与文档。
+
+## 成功度量
+- 关键流程 API 化:镜像同步/推送、部署 apply、K8s tag 更新均有 Gin 接口与 CLI 对应子命令。
+- 质量门槛:`go test ./...` 绿色,集成测试可在本地 one-command 运行;lint 无高危告警。
+- 运维可用性:无硬编码密钥,配置可按环境切换;指标可在 Prometheus 抓取,健康检查可被 K8s 探针消费。
+
diff --git a/agent-common/real_project/CmiiImageListConfig.go b/agent-common/real_project/CmiiImageListConfig.go
index a942cb0..82a5c5f 100644
--- a/agent-common/real_project/CmiiImageListConfig.go
+++ b/agent-common/real_project/CmiiImageListConfig.go
@@ -54,6 +54,18 @@ var CmiiUas22ImageList = []string{
"harbor.cdcyy.com.cn/cmii/cmii-live-oerator:5.2.0",
}
+var CmiiUavFlyControlImageList = []string{
+ "harbor.cdcyy.com.cn/cmii/cmii-fly-center:2.0.0-0126",
+ "harbor.cdcyy.com.cn/cmii/cmii-sky-converge:2.0.0-012601",
+ "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp",
+}
+
+var CmiiUavFlyControlARM64ImageList = []string{
+ "harbor.cdcyy.com.cn/cmii/cmii-fly-center:1.5.0-xa-030301-arm",
+ "harbor.cdcyy.com.cn/cmii/cmii-sky-converge:1.4.0-xa-0303-arm",
+ "harbor.cdcyy.com.cn/cmii/cmii-uav-platform-lite:2.0.0-2026030502-noicp-arm",
+}
+
var CmiiDorisImageList = []string{
"harbor.cdcyy.com.cn/cmii/doris.fe-amd64:2.1.6",
"harbor.cdcyy.com.cn/cmii/doris.be-amd64:2.1.6",
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-backend.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-backend.yaml
new file mode 100644
index 0000000..d7a775d
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-backend.yaml
@@ -0,0 +1,350 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-fly-center
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260207
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-fly-center
+ image: 172.22.106.77:8033/cmii/cmii-fly-center:2.0.0-0126
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-cd-uav-260207
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 172.22.106.77:8033/cmii/cmii-fly-center:2.0.0-0126
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-cd-uav-260207/cmii-fly-center
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-fly-center
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260207
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-sky-converge
+ image: 172.22.106.77:8033/cmii/cmii-sky-converge:2.0.0-012601
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-cd-uav-260207
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 172.22.106.77:8033/cmii/cmii-sky-converge:2.0.0-012601
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-cd-uav-260207/cmii-sky-converge
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-configmap.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-configmap.yaml
new file mode 100644
index 0000000..0b0e923
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-configmap.yaml
@@ -0,0 +1,686 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-lite
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "lite",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: sc-cd-uav-260207
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260207",
+ CloudHOST: "172.22.106.77:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-dashboard.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-dashboard.yaml
new file mode 100644
index 0000000..4cc876b
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 172.22.106.77:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 172.22.106.77:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-emqx.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-emqx.yaml
new file mode 100644
index 0000000..7211cd1
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-emqx.yaml
@@ -0,0 +1,664 @@
+---
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.sc-cd-uav-260207.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "sc-cd-uav-260207"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: sc-cd-uav-260207
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: sc-cd-uav-260207
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260207
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ # 动态选择 tools 镜像
+ image: 172.22.106.77:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ # 动态选择 emqx 镜像
+ image: 172.22.106.77:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ # 动态选择 tools 镜像
+ image: 172.22.106.77:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-frontend.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-frontend.yaml
new file mode 100644
index 0000000..058bd2b
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-frontend.yaml
@@ -0,0 +1,114 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-lite
+ image: 172.22.106.77:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-cd-uav-260207
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-lite
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-lite
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-ingress.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-ingress.yaml
new file mode 100644
index 0000000..828efae
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-ingress.yaml
@@ -0,0 +1,832 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: sc-cd-uav-260207
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/lite)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.sc-cd-uav-260207.io
+ http:
+ paths:
+ - path: /260207/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260207/supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /260207/supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /260207/pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260207/ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /260207/armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /260207/awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /260207/base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /260207/blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /260207/classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /260207/cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /260207/detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /260207/dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /260207/dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /260207/emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /260207/eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /260207/flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /260207/hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /260207/hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /260207/iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /260207/jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /260207/lite/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-lite
+ servicePort: 9528
+ - path: /260207/logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /260207/media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /260207/mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /260207/multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /260207/mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /260207/oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /260207/open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /260207/pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /260207/qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /260207/qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /260207/renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /260207/scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /260207/security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /260207/securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /260207/seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /260207/share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /260207/smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /260207/smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /260207/splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /260207/threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /260207/traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /260207/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /260207/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /260207/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /260207/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /260207/visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /260207/uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /260207/secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: sc-cd-uav-260207
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-260207.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: sc-cd-uav-260207
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.sc-cd-uav-260207.io
+ http:
+ paths:
+ - path: /260207/oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /260207/open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /260207/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /260207/uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /260207/converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-mongo.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-mongo.yaml
new file mode 100644
index 0000000..8ed5f52
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 172.22.106.77:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-mysql.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-mysql.yaml
new file mode 100644
index 0000000..534ffae
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 172.22.106.77:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 172.22.106.77:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/sc-cd-uav-260207/
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-nacos.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-nacos.yaml
new file mode 100644
index 0000000..1db8ac5
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 172.22.106.77:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-nfs-test.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-nfs-test.yaml
new file mode 100644
index 0000000..ad3064a
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 172.22.106.77:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-nfs.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-nfs.yaml
new file mode 100644
index 0000000..49b593d
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 172.22.106.77:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 172.22.106.77
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 172.22.106.77
+ path: /var/lib/docker/nfs_data
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-pvc.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-pvc.yaml
new file mode 100644
index 0000000..68bf2e2
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-rabbitmq.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..596f59c
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-cd-uav-260207
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-cd-uav-260207
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-cd-uav-260207
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 172.22.106.77:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 172.22.106.77:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-redis.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-redis.yaml
new file mode 100644
index 0000000..fad569e
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 172.22.106.77:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-cd-uav-260207
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-cd-uav-260207
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 172.22.106.77:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.sc-cd-uav-260207.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/agent-common/real_project/sc-cd-uav-260207/k8s-srs.yaml b/agent-common/real_project/sc-cd-uav-260207/k8s-srs.yaml
new file mode 100644
index 0000000..ca54377
--- /dev/null
+++ b/agent-common/real_project/sc-cd-uav-260207/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: sc-cd-uav-260207
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://172.22.106.77:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 172.22.106.77:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 172.22.106.77
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: sc-cd-uav-260207/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: sc-cd-uav-260207/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 172.22.106.77:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: sc-cd-uav-260207/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 172.22.106.77:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: sc-cd-uav-260207
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: 2.0
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: 2.0
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: 2.0
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://172.22.106.77:31935'
+ rtsp: 'rtsp://172.22.106.77:30554'
+ srt: 'srt://172.22.106.77:30556'
+ flv: 'http://172.22.106.77:30500'
+ hls: 'http://172.22.106.77:30500'
+ rtc: 'webrtc://172.22.106.77:30080'
+ replay: 'https://172.22.106.77:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-backend.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-backend.yaml
new file mode 100644
index 0000000..787d043
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-backend.yaml
@@ -0,0 +1,350 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-fly-center
+ image: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-fly-center
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-sky-converge
+ image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-sky-converge
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-configmap.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-configmap.yaml
new file mode 100644
index 0000000..12a8ee3
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-configmap.yaml
@@ -0,0 +1,686 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-lite
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "lite",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-dashboard.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-dashboard.yaml
new file mode 100644
index 0000000..035b36a
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 192.168.1.4:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 192.168.1.4:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-emqx.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-emqx.yaml
new file mode 100644
index 0000000..e28b83f
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-emqx.yaml
@@ -0,0 +1,664 @@
+---
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.sc-my-uav-260202.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "sc-my-uav-260202"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: sc-my-uav-260202
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: sc-my-uav-260202
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ # 动态选择 emqx 镜像
+ image: 192.168.1.4:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-frontend.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-frontend.yaml
new file mode 100644
index 0000000..2c0f9fb
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-frontend.yaml
@@ -0,0 +1,114 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-lite
+ image: 192.168.1.4:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-lite
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-lite
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-ingress.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-ingress.yaml
new file mode 100644
index 0000000..ce8c9b9
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-ingress.yaml
@@ -0,0 +1,832 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/lite)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /260202/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260202/supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /260202/supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /260202/pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260202/ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /260202/armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /260202/awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /260202/base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /260202/blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /260202/classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /260202/cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /260202/detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /260202/dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /260202/dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /260202/emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /260202/eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /260202/flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /260202/hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /260202/hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /260202/iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /260202/jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /260202/lite/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-lite
+ servicePort: 9528
+ - path: /260202/logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /260202/media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /260202/mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /260202/multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /260202/mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /260202/oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /260202/open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /260202/pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /260202/qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /260202/qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /260202/renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /260202/scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /260202/security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /260202/securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /260202/seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /260202/share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /260202/smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /260202/smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /260202/splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /260202/threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /260202/traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /260202/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /260202/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /260202/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /260202/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /260202/visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /260202/uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /260202/secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /260202/oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /260202/open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /260202/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /260202/uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /260202/converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-mongo.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-mongo.yaml
new file mode 100644
index 0000000..ee114fe
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 192.168.1.4:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-mysql.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-mysql.yaml
new file mode 100644
index 0000000..1ee6339
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 192.168.1.4:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/sc-my-uav-260202/
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-nacos.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-nacos.yaml
new file mode 100644
index 0000000..aa20e4b
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 192.168.1.4:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-nfs-test.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-nfs-test.yaml
new file mode 100644
index 0000000..315bde1
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.1.4:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-nfs.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-nfs.yaml
new file mode 100644
index 0000000..58d6b15
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 192.168.1.4:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 192.168.1.6
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.1.6
+ path: /var/lib/docker/nfs_data
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-pvc.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-pvc.yaml
new file mode 100644
index 0000000..df38dc9
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-rabbitmq.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..26e743e
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 192.168.1.4:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-redis.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-redis.yaml
new file mode 100644
index 0000000..db47fad
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.sc-my-uav-260202.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/agent-common/real_project/sc-my-uav-260202/k8s-srs.yaml b/agent-common/real_project/sc-my-uav-260202/k8s-srs.yaml
new file mode 100644
index 0000000..dd5e2cc
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://36.133.66.183:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 192.168.1.4:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 36.133.66.183
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: sc-my-uav-260202/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 192.168.1.4:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 192.168.1.4:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: 2.0
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: 2.0
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: 2.0
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://36.133.66.183:31935'
+ rtsp: 'rtsp://36.133.66.183:30554'
+ srt: 'srt://36.133.66.183:30556'
+ flv: 'http://36.133.66.183:30500'
+ hls: 'http://36.133.66.183:30500'
+ rtc: 'webrtc://36.133.66.183:30080'
+ replay: 'https://36.133.66.183:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-backend.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-backend.yaml
new file mode 100644
index 0000000..e237eaa
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-backend.yaml
@@ -0,0 +1,350 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-sky-converge
+ image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-sky-converge
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-sky-converge
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-fly-center
+ image: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: sc-my-uav-260202
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: sc-my-uav-260202/cmii-fly-center
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-fly-center
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-configmap.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-configmap.yaml
new file mode 100644
index 0000000..b97a516
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-configmap.yaml
@@ -0,0 +1,672 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: sc-my-uav-260202
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260202",
+ CloudHOST: "36.133.66.183:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-dashboard.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-dashboard.yaml
new file mode 100644
index 0000000..035b36a
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 192.168.1.4:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 192.168.1.4:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-emqx.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-emqx.yaml
new file mode 100644
index 0000000..e28b83f
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-emqx.yaml
@@ -0,0 +1,664 @@
+---
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.sc-my-uav-260202.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "sc-my-uav-260202"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: sc-my-uav-260202
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: sc-my-uav-260202
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260202
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ # 动态选择 emqx 镜像
+ image: 192.168.1.4:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ # 动态选择 tools 镜像
+ image: 192.168.1.4:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-frontend.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-frontend.yaml
new file mode 100644
index 0000000..8366b35
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-frontend.yaml
@@ -0,0 +1,25 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-ingress.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-ingress.yaml
new file mode 100644
index 0000000..28dd373
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-ingress.yaml
@@ -0,0 +1,826 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /260202/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260202/supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /260202/supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /260202/pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260202/ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /260202/armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /260202/awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /260202/base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /260202/blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /260202/classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /260202/cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /260202/detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /260202/dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /260202/dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /260202/emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /260202/eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /260202/flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /260202/hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /260202/hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /260202/iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /260202/jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /260202/logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /260202/media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /260202/mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /260202/multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /260202/mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /260202/oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /260202/open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /260202/pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /260202/qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /260202/qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /260202/renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /260202/scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /260202/security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /260202/securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /260202/seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /260202/share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /260202/smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /260202/smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /260202/splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /260202/threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /260202/traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /260202/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /260202/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /260202/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /260202/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /260202/visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /260202/uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /260202/secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-260202.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: sc-my-uav-260202
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.sc-my-uav-260202.io
+ http:
+ paths:
+ - path: /260202/oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /260202/open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /260202/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /260202/uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /260202/converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-mongo.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-mongo.yaml
new file mode 100644
index 0000000..ee114fe
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 192.168.1.4:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-mysql.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-mysql.yaml
new file mode 100644
index 0000000..1ee6339
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 192.168.1.4:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/sc-my-uav-260202/
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-nacos.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-nacos.yaml
new file mode 100644
index 0000000..aa20e4b
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 192.168.1.4:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs-test.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs-test.yaml
new file mode 100644
index 0000000..315bde1
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.1.4:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs.yaml
new file mode 100644
index 0000000..58d6b15
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 192.168.1.4:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 192.168.1.6
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.1.6
+ path: /var/lib/docker/nfs_data
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-pvc.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-pvc.yaml
new file mode 100644
index 0000000..df38dc9
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-rabbitmq.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..26e743e
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: sc-my-uav-260202
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 192.168.1.4:8033/cmii/rabbitmq:3.9.12-debian-10-r3
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-redis.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-redis.yaml
new file mode 100644
index 0000000..db47fad
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: sc-my-uav-260202
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: sc-my-uav-260202
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.sc-my-uav-260202.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/agent-common/real_project/sc-my-uav-260202/old/k8s-srs.yaml b/agent-common/real_project/sc-my-uav-260202/old/k8s-srs.yaml
new file mode 100644
index 0000000..dd5e2cc
--- /dev/null
+++ b/agent-common/real_project/sc-my-uav-260202/old/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: sc-my-uav-260202
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://36.133.66.183:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 192.168.1.4:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 36.133.66.183
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: sc-my-uav-260202/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 192.168.1.4:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: sc-my-uav-260202/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 192.168.1.4:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: sc-my-uav-260202
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: 2.0
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: 2.0
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: 2.0
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://36.133.66.183:31935'
+ rtsp: 'rtsp://36.133.66.183:30554'
+ srt: 'srt://36.133.66.183:30556'
+ flv: 'http://36.133.66.183:30500'
+ hls: 'http://36.133.66.183:30500'
+ rtc: 'webrtc://36.133.66.183:30080'
+ replay: 'https://36.133.66.183:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-backend.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-backend.yaml
new file mode 100644
index 0000000..4f39a46
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-backend.yaml
@@ -0,0 +1,350 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-fly-center
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260304
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-fly-center
+ image: 192.168.5.41:8033/cmii/cmii-fly-center:1.5.0-xa-030301-arm
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xj-aks-uav-260304
+ - name: APPLICATION_NAME
+ value: cmii-fly-center
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.5.41:8033/cmii/cmii-fly-center:1.5.0-xa-030301-arm
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-fly-center
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xj-aks-uav-260304/cmii-fly-center
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-fly-center
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-fly-center
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-sky-converge
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ template:
+ metadata:
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260304
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-sky-converge
+ image: 192.168.5.41:8033/cmii/cmii-sky-converge:1.4.0-xa-0303-arm
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xj-aks-uav-260304
+ - name: APPLICATION_NAME
+ value: cmii-sky-converge
+ - name: CUST_JAVA_OPTS
+ value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
+ - name: NACOS_REGISTRY
+ value: "helm-nacos:8848"
+ - name: NACOS_DISCOVERY_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: NACOS_DISCOVERY_PORT
+ value: "8080"
+ - name: BIZ_CONFIG_GROUP
+ value: 2.0
+ - name: SYS_CONFIG_GROUP
+ value: 2.0
+ - name: NACOS_USERNAME
+ value: "developer"
+ - name: NACOS_PASSWORD
+ value: "Deve@9128201"
+ - name: IMAGE_NAME
+ value: 192.168.5.41:8033/cmii/cmii-sky-converge:1.4.0-xa-0303-arm
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: LIMIT_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.cpu
+ - name: LIMIT_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: limits.memory
+ - name: REQUEST_CPU
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.cpu
+ - name: REQUEST_MEMORY
+ valueFrom:
+ resourceFieldRef:
+ containerName: cmii-sky-converge
+ resource: requests.memory
+ ports:
+ - name: pod-port
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /cmii/health
+ port: pod-port
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 3
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 5
+ volumeMounts:
+ - name: nfs-backend-log-volume
+ mountPath: /cmii/logs
+ readOnly: false
+ subPath: xj-aks-uav-260304/cmii-sky-converge
+ volumes:
+ - name: nfs-backend-log-volume
+ persistentVolumeClaim:
+ claimName: nfs-backend-log-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-sky-converge
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ octopus/control: backend-app-1.0.0
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/app-version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: backend
+ cmii.app: cmii-sky-converge
+ ports:
+ - name: backend-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-configmap.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-configmap.yaml
new file mode 100644
index 0000000..af7dd6e
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-configmap.yaml
@@ -0,0 +1,686 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-detection
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "detection",
+ AppClientId: "APP_FDHW2VLVDWPnnOCy"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-jiangsuwenlv
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "jiangsuwenlv",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qingdao
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "qingdao",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-secenter
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "secenter",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-renyike
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "renyike",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-armypeople
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "armypeople",
+ AppClientId: "APP_UIegse6Lfou9pO1U"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-traffic
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "traffic",
+ AppClientId: "APP_Jc8i2wOQ1t73QEJS"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-cmsportal
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "cmsportal",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-visualization
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "visualization",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uasms
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "uasms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pilot2cloud
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "pilot2cloud",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-blockchain
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "blockchain",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smauth
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "smauth",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-iot
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "iot",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-lite
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "lite",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-multiterminal
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "multiterminal",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-share
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "share",
+ AppClientId: "APP_4lVSVI0ZGxTssir8"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-splice
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "splice",
+ AppClientId: "APP_zE0M3sTRXrCIJS8Y"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-qinghaitourism
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "qinghaitourism",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hljtt
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "hljtt",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dispatchh5
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "dispatchh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uavmsmanager
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "uavmsmanager",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-ai-brain
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "ai-brain",
+ AppClientId: "APP_rafnuCAmBESIVYMH"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-base
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "base",
+ AppClientId: "APP_9LY41OaKSqk2btY0"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-uas
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "uas",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-dikongzhixingh5
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "dikongzhixingh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervisionh5
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "supervisionh5",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-seniclive
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "seniclive",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-classification
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "classification",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-eventsh5
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "eventsh5",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mianyangbackend
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "mianyangbackend",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-awareness
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "awareness",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-flight-control
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "flight-control",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-supervision
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "supervision",
+ AppClientId: "APP_qqSu82THfexI8PLM"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-emergency
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "emergency",
+ AppClientId: "APP_aGsTAY1uMZrpKdfk"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-oms
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "oms",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-security
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "security",
+ AppClientId: "APP_JUSEMc7afyWXxvE7"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-securityh5
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "securityh5",
+ AppClientId: "APP_N3ImO0Ubfu9peRHD"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-logistics
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "logistics",
+ AppClientId: "APP_PvdfRRRBPL8xbIwl"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-media
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "media",
+ AppClientId: "APP_4AU8lbifESQO4FD6"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-mws
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "mws",
+ AppClientId: "APP_uKniXPELlRERBBwK"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-open
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "open",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-threedsimulation
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "threedsimulation",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-hyper
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "hyper",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-scanner
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "scanner",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-smsecret
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "smsecret",
+ AppClientId: "empty"
+ }
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: tenant-prefix-pangu
+ namespace: xj-aks-uav-260304
+data:
+ ingress-config.js: |-
+ var __GlobalIngressConfig = {
+ TenantEnvironment: "260304",
+ CloudHOST: "120.205.80.23:8088",
+ ApplicationShortName: "",
+ AppClientId: "empty"
+ }
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-dashboard.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-dashboard.yaml
new file mode 100644
index 0000000..7f8eb63
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-dashboard.yaml
@@ -0,0 +1,315 @@
+---
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: 192.168.5.41:8033/cmii/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: 192.168.5.41:8033/cmii/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-emqx.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-emqx.yaml
new file mode 100644
index 0000000..7eb61e7
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-emqx.yaml
@@ -0,0 +1,664 @@
+---
+---
+# ============== Secret - 密码管理 ==============
+apiVersion: v1
+kind: Secret
+metadata:
+ name: emqx-credentials
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+type: Opaque
+stringData:
+ # Dashboard管理员密码
+ dashboard-admin-password: "odD8#Ve7.B"
+ # MQTT用户密码
+ mqtt-admin-password: "odD8#Ve7.B"
+
+---
+# ============== ServiceAccount ==============
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+
+---
+# ============== Role - RBAC ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+
+---
+# ============== RoleBinding ==============
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+
+---
+# ============== ConfigMap - Bootstrap配置文件 ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-config
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ # 主配置文件 - 覆盖默认配置
+ emqx.conf: |
+ # 节点配置
+ node {
+ name = "emqx@${POD_NAME}.helm-emqxs-headless.xj-aks-uav-260304.svc.cluster.local"
+ cookie = "emqx-cluster-cookie-secret"
+ data_dir = "/opt/emqx/data"
+ }
+
+ # 集群配置
+ cluster {
+ name = emqxcl
+ # 单节点 建议为 manual 多节点为k8s
+ discovery_strategy = manual
+ k8s {
+ apiserver = "https://kubernetes.default.svc.cluster.local:443"
+ service_name = "helm-emqxs-headless"
+ # 这里可以改为 hostname
+ address_type = dns
+ namespace = "xj-aks-uav-260304"
+ suffix = "svc.cluster.local"
+ }
+ }
+
+ # 日志配置
+ log {
+ console {
+ enable = true
+ level = info
+ }
+ file {
+ enable = true
+ level = warning
+ path = "/opt/emqx/log"
+ }
+ }
+
+ # Dashboard配置
+ dashboard {
+ listeners.http {
+ bind = "0.0.0.0:18083"
+ }
+ default_username = "admin"
+ default_password = "public"
+ }
+
+ # 监听器配置
+ listeners.tcp.default {
+ bind = "0.0.0.0:1883"
+ max_connections = 1024000
+ }
+
+ listeners.ws.default {
+ bind = "0.0.0.0:8083"
+ max_connections = 1024000
+ websocket.mqtt_path = "/mqtt"
+ }
+
+ listeners.ssl.default {
+ bind = "0.0.0.0:8883"
+ max_connections = 512000
+ }
+
+ # 认证配置 - 使用内置数据库
+ authentication = [
+ {
+ mechanism = password_based
+ backend = built_in_database
+ user_id_type = username
+ password_hash_algorithm {
+ name = sha256
+ salt_position = suffix
+ }
+ # Bootstrap文件路径 - 用于初始化用户
+ bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
+ bootstrap_type = plain
+ }
+ ]
+
+ # 授权配置
+ authorization {
+ no_match = deny
+ deny_action = disconnect
+
+ sources = [
+ {
+ type = built_in_database
+ enable = true
+ }
+ ]
+ }
+
+ # MQTT协议配置
+ mqtt {
+ max_packet_size = "1MB"
+ max_clientid_len = 65535
+ max_topic_levels = 128
+ max_qos_allowed = 2
+ max_topic_alias = 65535
+ retain_available = true
+ wildcard_subscription = true
+ shared_subscription = true
+ }
+
+---
+# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-bootstrap-users
+ namespace: xj-aks-uav-260304
+data:
+ bootstrap_users.json: |
+ [
+ { "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
+ { "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
+ ]
+
+ # 【修改点】既然有jq,这里使用标准的 JSON 数组格式,最不容易出错
+ bootstrap_acl.json: |
+ [
+ {
+ "username": "admin",
+ "rules": [
+ {"action": "all", "permission": "allow", "topic": "#"}
+ ]
+ },
+ {
+ "username": "cmlc",
+ "rules": [
+ {"action": "publish", "permission": "allow", "topic": "#"},
+ {"action": "subscribe", "permission": "allow", "topic": "#"}
+ ]
+ }
+ ]
+
+---
+# ============== ConfigMap - 初始化脚本 (修正版) ==============
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: emqx-init-dashboard
+ namespace: xj-aks-uav-260304
+data:
+ init-dashboard.sh: |
+ #!/bin/bash
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
+ EMQX_API="http://localhost:18083/api/v5"
+ ACL_FILE="/bootstrap/bootstrap_acl.json"
+
+ # 辅助函数:打印带时间戳的日志
+ log() {
+ echo "[$(date +'%H:%M:%S')] $1"
+ }
+
+ log "======================================"
+ log "初始化 Dashboard 与 ACL (Debug Version)"
+ log "======================================"
+
+ # ----------------------------------------------------------------
+ # 1. 等待 EMQX API 就绪
+ # ----------------------------------------------------------------
+ log "[1/4] 等待 EMQX API 就绪..."
+ for i in $(seq 1 60); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ log "✓ EMQX API 已就绪"
+ break
+ fi
+ if [ $i -eq 60 ]; then
+ log "✗ EMQX API 启动超时"
+ exit 1
+ fi
+ sleep 5
+ done
+
+ # ----------------------------------------------------------------
+ # 2. 修改 Dashboard 密码
+ # ----------------------------------------------------------------
+ log "[2/4] 检查/更新 Dashboard 密码..."
+
+ # 获取 Token (尝试默认密码)
+ LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -n "$TOKEN" ]; then
+ log " 检测到默认密码,正在更新..."
+ curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
+ log " ✓ Dashboard 密码已更新"
+ else
+ log " ℹ 无法使用默认密码登录,跳过更新(可能已修改)"
+ fi
+
+ # ----------------------------------------------------------------
+ # 3. 导入 ACL 规则
+ # ----------------------------------------------------------------
+ echo "[3/3] 导入ACL规则..."
+
+ # 重新登录获取最新 Token
+ LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
+
+ TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
+
+ if [ -z "$TOKEN" ]; then
+ echo " ✗ 无法获取Token,请检查密码设置"
+ exit 0
+ fi
+
+ if [ -f "$ACL_FILE" ]; then
+ echo " 正在解析 ACL 文件: $ACL_FILE"
+
+ if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
+ echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
+ exit 0
+ fi
+
+ jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
+ USERNAME=$(echo "$user_config" | jq -r '.username // empty')
+
+ # ✅ PUT/POST 都需要 username + rules(username 是 required)
+ REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
+
+ if [ -z "$USERNAME" ]; then
+ echo " ✗ ACL 条目缺少 username,跳过"
+ continue
+ fi
+
+ echo " 配置用户 ${USERNAME} 的ACL规则..."
+
+ # 1) 优先 PUT(覆盖更新)
+ http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code" = "204" ]; then
+ echo " ✓ PUT 更新成功"
+ elif [ "$http_code" = "404" ]; then
+ # 2) 不存在则 POST 创建
+ http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
+ -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "$REQ_BODY")
+
+ if [ "$http_code2" = "204" ]; then
+ echo " ✓ POST 创建成功"
+ else
+ echo " ✗ POST 失败 (HTTP ${http_code2}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+ else
+ echo " ✗ PUT 失败 (HTTP ${http_code}):$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
+ exit 1
+ fi
+
+ # 3) 导入后验证(可选但强烈建议保留)
+ verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
+ -H "Authorization: Bearer ${TOKEN}" \
+ "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
+
+ if [ "$verify_code" = "200" ]; then
+ echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
+ else
+ echo " ✗ 验证失败 (HTTP ${verify_code}):$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
+ exit 1
+ fi
+ done
+
+ echo " ✓ ACL 规则导入完成"
+ else
+ echo " ℹ 未找到 ACL 文件"
+ fi
+
+---
+# ============== StatefulSet ==============
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: uavcloud.env
+ operator: In
+ values:
+ - 260304
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: cmii.app
+ operator: In
+ values:
+ - helm-emqxs
+ topologyKey: kubernetes.io/hostname
+
+ imagePullSecrets:
+ - name: harborsecret
+
+ serviceAccountName: helm-emqxs
+
+ securityContext:
+ fsGroup: 1000
+ runAsUser: 1000
+
+ # InitContainer - 准备bootstrap文件
+ initContainers:
+ - name: prepare-bootstrap
+ # 动态选择 tools 镜像
+ image: 192.168.5.41:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+ # =========================================================
+ # 权限: 必须以 root 身份运行才能 chown
+ # =========================================================
+ securityContext:
+ runAsUser: 0
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "准备bootstrap文件..."
+
+ # 创建数据目录
+ mkdir -p /opt/emqx/data
+
+ # 复制bootstrap文件到数据目录
+ # 只在文件不存在时复制,避免覆盖已有数据
+ if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
+ cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
+ echo "✓ 已复制用户bootstrap文件"
+ else
+ echo "ℹ 用户bootstrap文件已存在,跳过"
+ fi
+
+ # 设置权限 (现在有root权限,可以成功)
+ chown -R 1000:1000 /opt/emqx/data
+
+ echo "✓ Bootstrap准备完成"
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ - name: bootstrap-users
+ mountPath: /bootstrap-src
+
+ containers:
+ # 主容器 - EMQX
+ - name: emqx
+ # 动态选择 emqx 镜像
+ image: 192.168.5.41:8033/cmii/emqx:5.8.8
+ imagePullPolicy: IfNotPresent
+
+ env:
+ # Pod信息
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: EMQX_DATA_DIR
+ value: "/opt/emqx/data"
+
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: ws
+ containerPort: 8083
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ limits:
+ cpu: "2000m"
+ memory: "2Gi"
+
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ timeoutSeconds: 10
+ failureThreshold: 3
+
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+
+ startupProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 30
+
+ volumeMounts:
+ - name: emqx-data
+ mountPath: /opt/emqx/data
+ # 使用 subPath 挂载单个配置文件,避免覆盖目录
+ - name: bootstrap-config
+ mountPath: /opt/emqx/etc/emqx.conf
+ subPath: emqx.conf
+
+ # Sidecar - 初始化Dashboard密码和ACL
+ - name: init-dashboard
+ # 动态选择 tools 镜像
+ image: 192.168.5.41:8033/cmii/tools:1.0
+ imagePullPolicy: IfNotPresent
+
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 等待主容器启动
+ echo "等待EMQX启动..."
+ sleep 20
+
+ # 执行初始化
+ /bin/sh /scripts/init-dashboard.sh
+
+ # 保持运行
+ echo "初始化完成,进入守护模式..."
+ while true; do sleep 3600; done
+
+ env:
+ - name: DASHBOARD_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: emqx-credentials
+ key: dashboard-admin-password
+
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "64Mi"
+ limits:
+ cpu: "200m"
+ memory: "128Mi"
+
+ volumeMounts:
+ - name: init-script
+ mountPath: /scripts
+ - name: bootstrap-users
+ mountPath: /bootstrap
+
+ volumes:
+ - name: bootstrap-config
+ configMap:
+ name: emqx-bootstrap-config
+ - name: bootstrap-users
+ configMap:
+ name: emqx-bootstrap-users
+ - name: init-script
+ configMap:
+ name: emqx-init-dashboard
+ defaultMode: 0755
+ - name: emqx-data
+ persistentVolumeClaim:
+ claimName: helm-emqxs
+
+---
+# ============== Service - Headless ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ targetPort: 4370
+
+---
+# ============== Service - NodePort ==============
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ targetPort: 1883
+ nodePort: 31883
+ - name: dashboard
+ port: 18083
+ targetPort: 18083
+ nodePort: 38085
+ - name: ws
+ port: 8083
+ targetPort: 8083
+ nodePort: 38083
+ - name: mqttssl
+ port: 8883
+ targetPort: 8883
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-frontend.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-frontend.yaml
new file mode 100644
index 0000000..e2a44a4
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-frontend.yaml
@@ -0,0 +1,114 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: nginx-cm
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: frontend
+data:
+ nginx.conf: |
+ server {
+ listen 9528;
+ server_name localhost;
+ gzip on;
+
+ location / {
+ root /home/cmii-platform/dist;
+ index index.html index.htm;
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ root html;
+ }
+ }
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/app-version: 2.0
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ template:
+ metadata:
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: cmii-uav-platform-lite
+ image: 192.168.5.41:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
+ imagePullPolicy: Always
+ env:
+ - name: K8S_NAMESPACE
+ value: xj-aks-uav-260304
+ - name: APPLICATION_NAME
+ value: cmii-uav-platform-lite
+ ports:
+ - name: platform-9528
+ containerPort: 9528
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "1"
+ memory: 1Gi
+ requests:
+ cpu: 50m
+ memory: 50Mi
+ volumeMounts:
+ - name: nginx-conf
+ mountPath: /etc/nginx/conf.d/nginx.conf
+ subPath: nginx.conf
+ - name: tenant-prefix
+ subPath: ingress-config.js
+ mountPath: /home/cmii-platform/dist/ingress-config.js
+ volumes:
+ - name: nginx-conf
+ configMap:
+ name: nginx-cm
+ items:
+ - key: nginx.conf
+ path: nginx.conf
+ - name: tenant-prefix
+ configMap:
+ name: tenant-prefix-lite
+ items:
+ - key: ingress-config.js
+ path: ingress-config.js
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmii-uav-platform-lite
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ octopus.control: frontend-app-wdd
+ app.kubernetes.io/version: 2.0
+spec:
+ type: ClusterIP
+ selector:
+ cmii.type: frontend
+ cmii.app: cmii-uav-platform-lite
+ ports:
+ - name: web-svc-port
+ port: 9528
+ protocol: TCP
+ targetPort: 9528
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-ingress.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-ingress.yaml
new file mode 100644
index 0000000..4b97d2d
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-ingress.yaml
@@ -0,0 +1,832 @@
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: frontend-applications-ingress
+ namespace: xj-aks-uav-260304
+ labels:
+ type: frontend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ rewrite ^(/supervision)$ $1/ redirect;
+ rewrite ^(/supervisionh5)$ $1/ redirect;
+ rewrite ^(/pangu)$ $1/ redirect;
+ rewrite ^(/ai-brain)$ $1/ redirect;
+ rewrite ^(/armypeople)$ $1/ redirect;
+ rewrite ^(/awareness)$ $1/ redirect;
+ rewrite ^(/base)$ $1/ redirect;
+ rewrite ^(/blockchain)$ $1/ redirect;
+ rewrite ^(/classification)$ $1/ redirect;
+ rewrite ^(/cmsportal)$ $1/ redirect;
+ rewrite ^(/detection)$ $1/ redirect;
+ rewrite ^(/dikongzhixingh5)$ $1/ redirect;
+ rewrite ^(/dispatchh5)$ $1/ redirect;
+ rewrite ^(/emergency)$ $1/ redirect;
+ rewrite ^(/eventsh5)$ $1/ redirect;
+ rewrite ^(/flight-control)$ $1/ redirect;
+ rewrite ^(/hljtt)$ $1/ redirect;
+ rewrite ^(/hyper)$ $1/ redirect;
+ rewrite ^(/iot)$ $1/ redirect;
+ rewrite ^(/jiangsuwenlv)$ $1/ redirect;
+ rewrite ^(/lite)$ $1/ redirect;
+ rewrite ^(/logistics)$ $1/ redirect;
+ rewrite ^(/media)$ $1/ redirect;
+ rewrite ^(/mianyangbackend)$ $1/ redirect;
+ rewrite ^(/multiterminal)$ $1/ redirect;
+ rewrite ^(/mws)$ $1/ redirect;
+ rewrite ^(/oms)$ $1/ redirect;
+ rewrite ^(/open)$ $1/ redirect;
+ rewrite ^(/pilot2cloud)$ $1/ redirect;
+ rewrite ^(/qingdao)$ $1/ redirect;
+ rewrite ^(/qinghaitourism)$ $1/ redirect;
+ rewrite ^(/renyike)$ $1/ redirect;
+ rewrite ^(/scanner)$ $1/ redirect;
+ rewrite ^(/security)$ $1/ redirect;
+ rewrite ^(/securityh5)$ $1/ redirect;
+ rewrite ^(/seniclive)$ $1/ redirect;
+ rewrite ^(/share)$ $1/ redirect;
+ rewrite ^(/smauth)$ $1/ redirect;
+ rewrite ^(/smsecret)$ $1/ redirect;
+ rewrite ^(/splice)$ $1/ redirect;
+ rewrite ^(/threedsimulation)$ $1/ redirect;
+ rewrite ^(/traffic)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uas)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/uasms)$ $1/ redirect;
+ rewrite ^(/visualization)$ $1/ redirect;
+ rewrite ^(/uavmsmanager)$ $1/ redirect;
+ rewrite ^(/secenter)$ $1/ redirect;
+spec:
+ rules:
+ - host: fake-domain.xj-aks-uav-260304.io
+ http:
+ paths:
+ - path: /260304/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260304/supervision/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervision
+ servicePort: 9528
+ - path: /260304/supervisionh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-platform-supervisionh5
+ servicePort: 9528
+ - path: /260304/pangu/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform
+ servicePort: 9528
+ - path: /260304/ai-brain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-ai-brain
+ servicePort: 9528
+ - path: /260304/armypeople/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-armypeople
+ servicePort: 9528
+ - path: /260304/awareness/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-awareness
+ servicePort: 9528
+ - path: /260304/base/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-base
+ servicePort: 9528
+ - path: /260304/blockchain/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-blockchain
+ servicePort: 9528
+ - path: /260304/classification/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-classification
+ servicePort: 9528
+ - path: /260304/cmsportal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-cms-portal
+ servicePort: 9528
+ - path: /260304/detection/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-detection
+ servicePort: 9528
+ - path: /260304/dikongzhixingh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dikongzhixingh5
+ servicePort: 9528
+ - path: /260304/dispatchh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-dispatchh5
+ servicePort: 9528
+ - path: /260304/emergency/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-emergency-rescue
+ servicePort: 9528
+ - path: /260304/eventsh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-eventsh5
+ servicePort: 9528
+ - path: /260304/flight-control/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-flight-control
+ servicePort: 9528
+ - path: /260304/hljtt/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hljtt
+ servicePort: 9528
+ - path: /260304/hyper/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-hyperspectral
+ servicePort: 9528
+ - path: /260304/iot/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-iot-manager
+ servicePort: 9528
+ - path: /260304/jiangsuwenlv/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-jiangsuwenlv
+ servicePort: 9528
+ - path: /260304/lite/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-lite
+ servicePort: 9528
+ - path: /260304/logistics/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-logistics
+ servicePort: 9528
+ - path: /260304/media/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-media
+ servicePort: 9528
+ - path: /260304/mianyangbackend/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mianyangbackend
+ servicePort: 9528
+ - path: /260304/multiterminal/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-multiterminal
+ servicePort: 9528
+ - path: /260304/mws/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-mws
+ servicePort: 9528
+ - path: /260304/oms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-oms
+ servicePort: 9528
+ - path: /260304/open/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-open
+ servicePort: 9528
+ - path: /260304/pilot2cloud/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-pilot2-to-cloud
+ servicePort: 9528
+ - path: /260304/qingdao/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qingdao
+ servicePort: 9528
+ - path: /260304/qinghaitourism/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-qinghaitourism
+ servicePort: 9528
+ - path: /260304/renyike/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-renyike
+ servicePort: 9528
+ - path: /260304/scanner/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-scanner
+ servicePort: 9528
+ - path: /260304/security/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-security
+ servicePort: 9528
+ - path: /260304/securityh5/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-securityh5
+ servicePort: 9528
+ - path: /260304/seniclive/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-seniclive
+ servicePort: 9528
+ - path: /260304/share/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-share
+ servicePort: 9528
+ - path: /260304/smauth/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smauth
+ servicePort: 9528
+ - path: /260304/smsecret/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-smsecret
+ servicePort: 9528
+ - path: /260304/splice/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-splice
+ servicePort: 9528
+ - path: /260304/threedsimulation/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-threedsimulation
+ servicePort: 9528
+ - path: /260304/traffic/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-traffic
+ servicePort: 9528
+ - path: /260304/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uas
+ servicePort: 9528
+ - path: /260304/uas/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uaskny
+ servicePort: 9528
+ - path: /260304/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasms
+ servicePort: 9528
+ - path: /260304/uasms/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-uasmskny
+ servicePort: 9528
+ - path: /260304/visualization/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-platform-visualization
+ servicePort: 9528
+ - path: /260304/uavmsmanager/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-manager
+ servicePort: 9528
+ - path: /260304/secenter/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-platform-security-center
+ servicePort: 9528
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: backend-applications-ingress
+ namespace: xj-aks-uav-260304
+ labels:
+ type: backend
+ octopus.control: all-ingress-config-wdd
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+spec:
+ rules:
+ - host: cmii-admin-data.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-data
+ servicePort: 8080
+ - host: cmii-admin-gateway.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - host: cmii-admin-user.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-user
+ servicePort: 8080
+ - host: cmii-app-release.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-app-release
+ servicePort: 8080
+ - host: cmii-open-gateway.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - host: cmii-sky-converge.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
+ - host: cmii-suav-supervision.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-suav-supervision
+ servicePort: 8080
+ - host: cmii-uas-datahub.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-datahub
+ servicePort: 8080
+ - host: cmii-uas-gateway.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - host: cmii-uas-lifecycle.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-lifecycle
+ servicePort: 8080
+ - host: cmii-uav-advanced5g.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-advanced5g
+ servicePort: 8080
+ - host: cmii-uav-airspace.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-airspace
+ servicePort: 8080
+ - host: cmii-uav-alarm.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-alarm
+ servicePort: 8080
+ - host: cmii-uav-autowaypoint.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-autowaypoint
+ servicePort: 8080
+ - host: cmii-uav-brain.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-brain
+ servicePort: 8080
+ - host: cmii-uav-bridge.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-bridge
+ servicePort: 8080
+ - host: cmii-uav-cloud-live.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cloud-live
+ servicePort: 8080
+ - host: cmii-uav-clusters.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-clusters
+ servicePort: 8080
+ - host: cmii-uav-cms.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-cms
+ servicePort: 8080
+ - host: cmii-uav-data-post-process.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-data-post-process
+ servicePort: 8080
+ - host: cmii-uav-depotautoreturn.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-depotautoreturn
+ servicePort: 8080
+ - host: cmii-uav-developer.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-developer
+ servicePort: 8080
+ - host: cmii-uav-device.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-device
+ servicePort: 8080
+ - host: cmii-uav-emergency.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-emergency
+ servicePort: 8080
+ - host: cmii-uav-fwdd.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-fwdd
+ servicePort: 8080
+ - host: cmii-uav-gateway.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - host: cmii-uav-gis-server.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gis-server
+ servicePort: 8080
+ - host: cmii-uav-grid-datasource.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-datasource
+ servicePort: 8080
+ - host: cmii-uav-grid-engine.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-engine
+ servicePort: 8080
+ - host: cmii-uav-grid-manage.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-grid-manage
+ servicePort: 8080
+ - host: cmii-uav-industrial-portfolio.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-industrial-portfolio
+ servicePort: 8080
+ - host: cmii-uav-integration.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-integration
+ servicePort: 8080
+ - host: cmii-uav-iot-dispatcher.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-dispatcher
+ servicePort: 8080
+ - host: cmii-uav-iot-manager.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-iot-manager
+ servicePort: 8080
+ - host: cmii-uav-kpi-monitor.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-kpi-monitor
+ servicePort: 8080
+ - host: cmii-uav-logger.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-logger
+ servicePort: 8080
+ - host: cmii-uav-material-warehouse.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-material-warehouse
+ servicePort: 8080
+ - host: cmii-uav-mission.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mission
+ servicePort: 8080
+ - host: cmii-uav-mqtthandler.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-mqtthandler
+ servicePort: 8080
+ - host: cmii-uav-multilink.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-multilink
+ servicePort: 8080
+ - host: cmii-uav-notice.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-notice
+ servicePort: 8080
+ - host: cmii-uav-oauth.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-oauth
+ servicePort: 8080
+ - host: cmii-uav-process.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-process
+ servicePort: 8080
+ - host: cmii-uav-sec-awareness.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sec-awareness
+ servicePort: 8080
+ - host: cmii-uav-security-trace.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-security-trace
+ servicePort: 8080
+ - host: cmii-uav-sense-adapter.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sense-adapter
+ servicePort: 8080
+ - host: cmii-uav-surveillance.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-surveillance
+ servicePort: 8080
+ - host: cmii-uav-sync.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-sync
+ servicePort: 8080
+ - host: cmii-uav-tcp-server.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tcp-server
+ servicePort: 8080
+ - host: cmii-uav-threedsimulation.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-threedsimulation
+ servicePort: 8080
+ - host: cmii-uav-tower.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-tower
+ servicePort: 8080
+ - host: cmii-uav-user.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-user
+ servicePort: 8080
+ - host: cmii-uav-watchdog.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-watchdog
+ servicePort: 8080
+ - host: cmii-uav-waypoint.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-waypoint
+ servicePort: 8080
+ - host: cmii-uavms-pyfusion.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-pyfusion
+ servicePort: 8080
+ - host: cmii-uavms-security-center.uavcloud-260304.io
+ http:
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uavms-security-center
+ servicePort: 8080
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: all-gateways-ingress
+ namespace: xj-aks-uav-260304
+ labels:
+ type: api-gateway
+ octopus.control: all-ingress-config-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ kubernetes.io/ingress.class: "nginx"
+ nginx.ingress.kubernetes.io/enable-cors: "true"
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ proxy_set_header upgradePrefix $http_upgrade;
+ proxy_set_header Connection "upgradePrefix";
+spec:
+ rules:
+ - host: fake-domain.xj-aks-uav-260304.io
+ http:
+ paths:
+ - path: /260304/oms/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-admin-gateway
+ servicePort: 8080
+ - path: /260304/open/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-open-gateway
+ servicePort: 8080
+ - path: /260304/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uav-gateway
+ servicePort: 8080
+ - path: /260304/uas/api/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-uas-gateway
+ servicePort: 8080
+ - path: /260304/converge/?(.*)
+ pathType: ImplementationSpecific
+ backend:
+ serviceName: cmii-sky-converge
+ servicePort: 8080
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-mongo.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-mongo.yaml
new file mode 100644
index 0000000..bd9d471
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-mongo.yaml
@@ -0,0 +1,78 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mongo
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ ports:
+ - port: 27017
+ name: server-27017
+ targetPort: 27017
+ nodePort: 37017
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mongo
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-mongo
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-mongo
+ cmii.type: middleware
+ helm.sh/chart: mongo-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: helm-mongo
+ image: 192.168.5.41:8033/cmii/mongo:5.0
+ resources: {}
+ ports:
+ - containerPort: 27017
+ name: mongo27017
+ protocol: TCP
+ env:
+ - name: MONGO_INITDB_ROOT_USERNAME
+ value: cmlc
+ - name: MONGO_INITDB_ROOT_PASSWORD
+ value: REdPza8#oVlt
+ volumeMounts:
+ - name: mongo-data
+ mountPath: /data/db
+ readOnly: false
+ subPath: default/helm-mongo/data/db
+ volumes:
+ - name: mongo-data
+ persistentVolumeClaim:
+ claimName: helm-mongo
+---
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-mysql.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-mysql.yaml
new file mode 100644
index 0000000..64de174
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-mysql.yaml
@@ -0,0 +1,410 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ annotations: {}
+secrets:
+ - name: helm-mysql
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ mysql-root-password: "UXpmWFFoZDNiUQ=="
+ mysql-password: "S0F0cm5PckFKNw=="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ my.cnf: |-
+
+ [mysqld]
+ port=3306
+ basedir=/opt/bitnami/mysql
+ datadir=/bitnami/mysql/data
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ log-error=/bitnami/mysql/data/error.log
+ general_log_file = /bitnami/mysql/data/general.log
+ slow_query_log_file = /bitnami/mysql/data/slow.log
+ innodb_data_file_path = ibdata1:512M:autoextend
+ innodb_buffer_pool_size = 512M
+ innodb_buffer_pool_instances = 2
+ innodb_log_file_size = 512M
+ innodb_log_files_in_group = 4
+ innodb_log_files_in_group = 4
+ log-bin = /bitnami/mysql/data/mysql-bin
+ max_binlog_size=1G
+ transaction_isolation = REPEATABLE-READ
+ default_storage_engine = innodb
+ character-set-server = utf8mb4
+ collation-server=utf8mb4_bin
+ binlog_format = ROW
+ binlog_rows_query_log_events=on
+ binlog_cache_size=4M
+ binlog_expire_logs_seconds = 1296000
+ max_binlog_cache_size=2G
+ gtid_mode = on
+ enforce_gtid_consistency = 1
+ sync_binlog = 1
+ innodb_flush_log_at_trx_commit = 1
+ innodb_flush_method = O_DIRECT
+ log_slave_updates=1
+ relay_log_recovery = 1
+ relay-log-purge = 1
+ default_time_zone = '+08:00'
+ lower_case_table_names=1
+ log_bin_trust_function_creators=1
+ group_concat_max_len=67108864
+ innodb_io_capacity = 4000
+ innodb_io_capacity_max = 8000
+ innodb_flush_sync = 0
+ innodb_flush_neighbors = 0
+ innodb_write_io_threads = 8
+ innodb_read_io_threads = 8
+ innodb_purge_threads = 4
+ innodb_page_cleaners = 4
+ innodb_open_files = 65535
+ innodb_max_dirty_pages_pct = 50
+ innodb_lru_scan_depth = 4000
+ innodb_checksum_algorithm = crc32
+ innodb_lock_wait_timeout = 10
+ innodb_rollback_on_timeout = 1
+ innodb_print_all_deadlocks = 1
+ innodb_file_per_table = 1
+ innodb_online_alter_log_max_size = 4G
+ innodb_stats_on_metadata = 0
+ innodb_thread_concurrency = 0
+ innodb_sync_spin_loops = 100
+ innodb_spin_wait_delay = 30
+ lock_wait_timeout = 3600
+ slow_query_log = 1
+ long_query_time = 10
+ log_queries_not_using_indexes =1
+ log_throttle_queries_not_using_indexes = 60
+ min_examined_row_limit = 100
+ log_slow_admin_statements = 1
+ log_slow_slave_statements = 1
+ default_authentication_plugin=mysql_native_password
+ skip-name-resolve=1
+ explicit_defaults_for_timestamp=1
+ plugin_dir=/opt/bitnami/mysql/plugin
+ max_allowed_packet=128M
+ max_connections = 2000
+ max_connect_errors = 1000000
+ table_definition_cache=2000
+ table_open_cache_instances=64
+ tablespace_definition_cache=1024
+ thread_cache_size=256
+ interactive_timeout = 600
+ wait_timeout = 600
+ tmpdir=/opt/bitnami/mysql/tmp
+ max_allowed_packet=32M
+ bind-address=0.0.0.0
+ performance_schema = 1
+ performance_schema_instrument = '%memory%=on'
+ performance_schema_instrument = '%lock%=on'
+ innodb_monitor_enable=ALL
+
+ [mysql]
+ no-auto-rehash
+
+ [mysqldump]
+ quick
+ max_allowed_packet = 32M
+
+ [client]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ default-character-set=UTF8
+ plugin_dir=/opt/bitnami/mysql/plugin
+
+ [manager]
+ port=3306
+ socket=/opt/bitnami/mysql/tmp/mysql.sock
+ pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-mysql-init-scripts
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: primary
+data:
+ create_users_grants_core.sql: |-
+ create user zyly@'%' identified by 'Cmii@451315';
+ grant select on *.* to zyly@'%';
+ create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
+ grant all on *.* to zyly_qc@'%';
+ create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
+ grant all on *.* to k8s_admin@'%';
+ create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
+ grant all on *.* to audit_dba@'%';
+ create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
+ GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
+ create user monitor@'%' identified by 'PL3#nGtrWbf-';
+ grant REPLICATION CLIENT on *.* to monitor@'%';
+ flush privileges;
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: cmii-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.app: mysql
+ cmii.type: middleware
+ octopus.control: mysql-db-wdd
+spec:
+ ports:
+ - name: mysql
+ protocol: TCP
+ port: 13306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/component: primary
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.app: mysql
+ cmii.type: middleware
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql-headless
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: mysql
+ port: 3306
+ targetPort: mysql
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ annotations: {}
+spec:
+ type: NodePort
+ ports:
+ - name: mysql
+ port: 3306
+ protocol: TCP
+ targetPort: mysql
+ nodePort: 33306
+ selector:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-mysql
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mysql-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ serviceName: helm-mysql
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
+ labels:
+ app.kubernetes.io/name: mysql-db
+ octopus.control: mysql-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: mysql
+ app.kubernetes.io/component: primary
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-mysql
+ affinity: {}
+ nodeSelector:
+ mysql-deploy: "true"
+ securityContext:
+ fsGroup: 1001
+ initContainers:
+ - name: change-volume-permissions
+ image: 192.168.5.41:8033/cmii/os-shell:12-debian-12-r51
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ chown -R 1001:1001 /bitnami/mysql
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ containers:
+ - name: mysql
+ image: 192.168.5.41:8033/cmii/mysql:8.1.0-debian-11-r42
+ imagePullPolicy: "IfNotPresent"
+ securityContext:
+ runAsUser: 1001
+ env:
+ - name: BITNAMI_DEBUG
+ value: "true"
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-mysql
+ key: mysql-root-password
+ - name: MYSQL_DATABASE
+ value: "cmii"
+ ports:
+ - name: mysql
+ containerPort: 3306
+ livenessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ readinessProbe:
+ failureThreshold: 5
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 3
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ startupProbe:
+ failureThreshold: 60
+ initialDelaySeconds: 120
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ password_aux="${MYSQL_ROOT_PASSWORD:-}"
+ if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
+ password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
+ fi
+ mysqladmin status -uroot -p"${password_aux}"
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: mysql-data
+ mountPath: /bitnami/mysql
+ - name: custom-init-scripts
+ mountPath: /docker-entrypoint-initdb.d
+ - name: config
+ mountPath: /opt/bitnami/mysql/conf/my.cnf
+ subPath: my.cnf
+ volumes:
+ - name: config
+ configMap:
+ name: helm-mysql
+ - name: custom-init-scripts
+ configMap:
+ name: helm-mysql-init-scripts
+ - name: mysql-data
+ hostPath:
+ path: /var/lib/docker/mysql-pv/xj-aks-uav-260304/
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-nacos.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-nacos.yaml
new file mode 100644
index 0000000..162fde2
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-nacos.yaml
@@ -0,0 +1,130 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-nacos-cm
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+data:
+ mysql.db.name: "cmii_nacos_config"
+ mysql.db.host: "helm-mysql"
+ mysql.port: "3306"
+ mysql.user: "k8s_admin"
+ mysql.password: "fP#UaH6qQ3)8"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-nacos
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ type: NodePort
+ selector:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ ports:
+ - port: 8848
+ name: server
+ targetPort: 8848
+ nodePort: 38848
+ - port: 9848
+ name: server12
+ targetPort: 9848
+ - port: 9849
+ name: server23
+ targetPort: 9849
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-nacos
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/version: 2.0
+spec:
+ serviceName: helm-nacos
+ replicas: 1
+ selector:
+ matchLabels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ template:
+ metadata:
+ labels:
+ cmii.app: helm-nacos
+ cmii.type: middleware
+ octopus.control: nacos-wdd
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/version: 2.0
+ annotations:
+ pod.alpha.kubernetes.io/initialized: "true"
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ containers:
+ - name: nacos-server
+ image: 192.168.5.41:8033/cmii/nacos-server:v2.1.2
+ ports:
+ - containerPort: 8848
+ name: dashboard
+ - containerPort: 9848
+ name: tcp-9848
+ - containerPort: 9849
+ name: tcp-9849
+ env:
+ - name: NACOS_AUTH_ENABLE
+ value: "false"
+ - name: NACOS_REPLICAS
+ value: "1"
+ - name: MYSQL_SERVICE_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.name
+ - name: MYSQL_SERVICE_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.port
+ - name: MYSQL_SERVICE_USER
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.user
+ - name: MYSQL_SERVICE_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.password
+ - name: MYSQL_SERVICE_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: helm-nacos-cm
+ key: mysql.db.host
+ - name: NACOS_SERVER_PORT
+ value: "8848"
+ - name: NACOS_APPLICATION_PORT
+ value: "8848"
+ - name: PREFER_HOST_MODE
+ value: "hostname"
+ - name: MODE
+ value: standalone
+ - name: SPRING_DATASOURCE_PLATFORM
+ value: mysql
+---
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-nfs-test.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-nfs-test.yaml
new file mode 100644
index 0000000..12dab6c
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-nfs-test.yaml
@@ -0,0 +1,38 @@
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: test-claim
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nfs-prod-distribute
+ resources:
+ requests:
+ storage: 1Mi
+---
+kind: Pod
+apiVersion: v1
+metadata:
+ name: test-pod
+spec:
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: test-pod
+ image: 192.168.5.41:8033/cmii/busybox:latest
+ command:
+ - "/bin/sh"
+ args:
+ - "-c"
+ - "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
+ volumeMounts:
+ - name: nfs-pvc
+ mountPath: "/mnt"
+ restartPolicy: "Never"
+ volumes:
+ - name: nfs-pvc
+ persistentVolumeClaim:
+ claimName: test-claim #与PVC名称保持一致
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-nfs.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-nfs.yaml
new file mode 100644
index 0000000..5e982e4
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-nfs.yaml
@@ -0,0 +1,114 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #根据实际环境设定namespace,下面类同
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: nfs-client-provisioner-runner
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: run-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+# name: nfs-client-provisioner-runner
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: leader-locking-nfs-client-provisioner
+subjects:
+ - kind: ServiceAccount
+ name: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system
+roleRef:
+ kind: Role
+ name: leader-locking-nfs-client-provisioner
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: nfs-prod-distribute
+provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nfs-client-provisioner
+ labels:
+ app: nfs-client-provisioner
+ # replace with namespace where provisioner is deployed
+ namespace: kube-system #与RBAC文件中的namespace保持一致
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nfs-client-provisioner
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: nfs-client-provisioner
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: nfs-client-provisioner
+ containers:
+ - name: nfs-client-provisioner
+ image: 192.168.5.41:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
+ volumeMounts:
+ - name: nfs-client-root
+ mountPath: /persistentvolumes
+ env:
+ - name: PROVISIONER_NAME
+ value: cmlc-nfs-storage
+ - name: NFS_SERVER
+ value: 192.168.5.141
+ - name: NFS_PATH
+ value: /var/lib/docker/nfs_data
+ volumes:
+ - name: nfs-client-root
+ nfs:
+ server: 192.168.5.141
+ path: /var/lib/docker/nfs_data
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-pvc.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-pvc.yaml
new file mode 100644
index 0000000..813046a
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-pvc.yaml
@@ -0,0 +1,76 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: nfs-backend-log-pvc
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware-base
+ cmii.app: nfs-backend-log-pvc
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 100Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-emqxs
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-emqxs
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-mongo
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-mongo
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 30Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: helm-rabbitmq
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.type: middleware-base
+ cmii.app: helm-rabbitmq
+ helm.sh/chart: all-persistence-volume-claims-1.1.0
+ app.kubernetes.io/version: 2.0
+spec:
+ storageClassName: nfs-prod-distribute
+ accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ resources:
+ requests:
+ storage: 20Gi
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-rabbitmq.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-rabbitmq.yaml
new file mode 100644
index 0000000..96ea7b1
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-rabbitmq.yaml
@@ -0,0 +1,328 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-rabbitmq
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+automountServiceAccountToken: true
+secrets:
+ - name: helm-rabbitmq
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-rabbitmq
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+type: Opaque
+data:
+ rabbitmq-password: "blljUk45MXIuX2hq"
+ rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-rabbitmq-config
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+data:
+ rabbitmq.conf: |-
+ ## Username and password
+ ##
+ default_user = admin
+ default_pass = nYcRN91r._hj
+ ## Clustering
+ ##
+ cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
+ cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
+ cluster_formation.node_cleanup.interval = 10
+ cluster_formation.node_cleanup.only_log_warning = true
+ cluster_partition_handling = autoheal
+ # queue master locator
+ queue_master_locator = min-masters
+ # enable guest user
+ loopback_users.guest = false
+ #default_vhost = default-vhost
+ #disk_free_limit.absolute = 50MB
+ #load_definitions = /app/load_definition.json
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+rules:
+ - apiGroups: [""]
+ resources: ["endpoints"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-rabbitmq-endpoint-reader
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+subjects:
+ - kind: ServiceAccount
+ name: helm-rabbitmq
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: helm-rabbitmq-endpoint-reader
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq-headless
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ clusterIP: None
+ ports:
+ - name: epmd
+ port: 4369
+ targetPort: epmd
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ - name: dist
+ port: 25672
+ targetPort: dist
+ - name: dashboard
+ port: 15672
+ targetPort: stats
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xj-aks-uav-260304
+ publishNotReadyAddresses: true
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-rabbitmq
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ type: NodePort
+ ports:
+ - name: amqp
+ port: 5672
+ targetPort: amqp
+ nodePort: 35672
+ - name: dashboard
+ port: 15672
+ targetPort: dashboard
+ nodePort: 36675
+ selector:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xj-aks-uav-260304
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-rabbitmq
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+spec:
+ serviceName: helm-rabbitmq-headless
+ podManagementPolicy: OrderedReady
+ replicas: 1
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: helm-rabbitmq
+ app.kubernetes.io/release: xj-aks-uav-260304
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: helm-rabbitmq
+ helm.sh/chart: rabbitmq-8.26.1
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: rabbitmq
+ annotations:
+ checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
+ checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-rabbitmq
+ affinity: {}
+ securityContext:
+ fsGroup: 5001
+ runAsUser: 5001
+ terminationGracePeriodSeconds: 120
+ initContainers:
+ - name: volume-permissions
+ image: 192.168.5.41:8033/cmii/os-shell:12-debian-12-r51
+ imagePullPolicy: "Always"
+ command:
+ - /bin/bash
+ args:
+ - -ec
+ - |
+ mkdir -p "/bitnami/rabbitmq/mnesia"
+ chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
+ securityContext:
+ runAsUser: 0
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ containers:
+ - name: rabbitmq
+ image: 192.168.5.41:8033/cmii/rabbitmq:3.13.7-debian-12-r5
+ imagePullPolicy: "Always"
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: K8S_SERVICE_NAME
+ value: "helm-rabbitmq-headless"
+ - name: K8S_ADDRESS_TYPE
+ value: hostname
+ - name: RABBITMQ_FORCE_BOOT
+ value: "no"
+ - name: RABBITMQ_NODE_NAME
+ value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: K8S_HOSTNAME_SUFFIX
+ value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
+ - name: RABBITMQ_MNESIA_DIR
+ value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
+ - name: RABBITMQ_LDAP_ENABLE
+ value: "no"
+ - name: RABBITMQ_LOGS
+ value: "-"
+ - name: RABBITMQ_ULIMIT_NOFILES
+ value: "65536"
+ - name: RABBITMQ_USE_LONGNAME
+ value: "true"
+ - name: RABBITMQ_ERL_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-erlang-cookie
+ - name: RABBITMQ_LOAD_DEFINITIONS
+ value: "no"
+ - name: RABBITMQ_SECURE_PASSWORD
+ value: "yes"
+ - name: RABBITMQ_USERNAME
+ value: "admin"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-rabbitmq
+ key: rabbitmq-password
+ - name: RABBITMQ_PLUGINS
+ value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
+ ports:
+ - name: amqp
+ containerPort: 5672
+ - name: dist
+ containerPort: 25672
+ - name: dashboard
+ containerPort: 15672
+ - name: epmd
+ containerPort: 4369
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q ping
+ initialDelaySeconds: 120
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/bash
+ - -ec
+ - |
+ if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
+ /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
+ else
+ rabbitmqctl stop_app
+ fi
+ resources:
+ limits: {}
+ requests: {}
+ volumeMounts:
+ - name: configuration
+ mountPath: /bitnami/rabbitmq/conf
+ - name: data
+ mountPath: /bitnami/rabbitmq/mnesia
+ volumes:
+ - name: configuration
+ configMap:
+ name: helm-rabbitmq-config
+ items:
+ - key: rabbitmq.conf
+ path: rabbitmq.conf
+ - name: data
+ persistentVolumeClaim:
+ claimName: helm-rabbitmq
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-redis.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-redis.yaml
new file mode 100644
index 0000000..00c722d
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-redis.yaml
@@ -0,0 +1,585 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+automountServiceAccountToken: true
+metadata:
+ name: helm-redis
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: helm-redis
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+type: Opaque
+data:
+ redis-password: "TWNhY2hlQDQ1MjI="
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-configuration
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+data:
+ redis.conf: |-
+ # User-supplied common configuration:
+ # Enable AOF https://redis.io/topics/persistence#append-only-file
+ appendonly yes
+ # Disable RDB persistence, AOF persistence already enabled.
+ save ""
+ # End of common configuration
+ master.conf: |-
+ dir /data
+ # User-supplied master configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of master configuration
+ replica.conf: |-
+ dir /data
+ slave-read-only yes
+ # User-supplied replica configuration:
+ rename-command FLUSHDB ""
+ rename-command FLUSHALL ""
+ # End of replica configuration
+---
+# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-health
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+data:
+ ping_readiness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_local.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h localhost \
+ -p $REDIS_PORT \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_liveness_master.sh: |-
+ #!/bin/bash
+
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
+ response=$(
+ timeout -s 3 $1 \
+ redis-cli \
+ -h $REDIS_MASTER_HOST \
+ -p $REDIS_MASTER_PORT_NUMBER \
+ ping
+ )
+ if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
+ echo "$response"
+ exit 1
+ fi
+ ping_readiness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+ ping_liveness_local_and_master.sh: |-
+ script_dir="$(dirname "$0")"
+ exit_status=0
+ "$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
+ "$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
+ exit $exit_status
+---
+# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-redis-scripts
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+data:
+ start-master.sh: |
+ #!/bin/bash
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
+ exec redis-server "${ARGS[@]}"
+ start-replica.sh: |
+ #!/bin/bash
+
+ get_port() {
+ hostname="$1"
+ type="$2"
+
+ port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
+ port=${!port_var}
+
+ if [ -z "$port" ]; then
+ case $type in
+ "SENTINEL")
+ echo 26379
+ ;;
+ "REDIS")
+ echo 6379
+ ;;
+ esac
+ else
+ echo $port
+ fi
+ }
+
+ get_full_hostname() {
+ hostname="$1"
+ echo "${hostname}.${HEADLESS_SERVICE}"
+ }
+
+ REDISPORT=$(get_port "$HOSTNAME" "REDIS")
+
+ [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
+ [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
+ if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
+ fi
+ if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
+ cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
+ fi
+
+ echo "" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
+ echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
+ ARGS=("--port" "${REDIS_PORT}")
+ ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
+ ARGS+=("--requirepass" "${REDIS_PASSWORD}")
+ ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
+ ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
+ exec redis-server "${ARGS[@]}"
+---
+# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-headless
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+---
+# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-master
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ type: ClusterIP
+
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-redis-replicas
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ type: ClusterIP
+ ports:
+ - name: tcp-redis
+ port: 6379
+ targetPort: redis
+ nodePort: null
+ selector:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/component: replica
+---
+# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-master
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ cmii.type: middleware
+ cmii.app: redis
+ app.kubernetes.io/component: master
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ affinity: {}
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ imagePullSecrets:
+ - name: harborsecret
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.5.41:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-master.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: master
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ # One second longer than command timeout should prevent generation of zombie processes.
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc/
+ - name: tmp
+ mountPath: /tmp
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+---
+# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-redis-replicas
+ namespace: xj-aks-uav-260304
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-db
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/component: replica
+ serviceName: helm-redis-headless
+ updateStrategy:
+ rollingUpdate: {}
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-db
+ octopus.control: redis-db-wdd
+ app.kubernetes.io/release: xj-aks-uav-260304
+ app.kubernetes.io/managed-by: octopus
+ app.kubernetes.io/component: replica
+ annotations:
+ checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
+ checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
+ checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
+ checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ securityContext:
+ fsGroup: 1001
+ serviceAccountName: helm-redis
+ terminationGracePeriodSeconds: 30
+ containers:
+ - name: redis
+ image: 192.168.5.41:8033/cmii/redis:6.2.14-debian-11-r1
+ imagePullPolicy: "Always"
+ securityContext:
+ runAsUser: 1001
+ command:
+ - /bin/bash
+ args:
+ - -c
+ - /opt/bitnami/scripts/start-scripts/start-replica.sh
+ env:
+ - name: BITNAMI_DEBUG
+ value: "false"
+ - name: REDIS_REPLICATION_MODE
+ value: slave
+ - name: REDIS_MASTER_HOST
+ value: helm-redis-master-0.helm-redis-headless.xj-aks-uav-260304.svc.cluster.local
+ - name: REDIS_MASTER_PORT_NUMBER
+ value: "6379"
+ - name: ALLOW_EMPTY_PASSWORD
+ value: "no"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_MASTER_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: helm-redis
+ key: redis-password
+ - name: REDIS_TLS_ENABLED
+ value: "no"
+ - name: REDIS_PORT
+ value: "6379"
+ ports:
+ - name: redis
+ containerPort: 6379
+ livenessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 6
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_liveness_local_and_master.sh 5
+ readinessProbe:
+ initialDelaySeconds: 20
+ periodSeconds: 5
+ timeoutSeconds: 2
+ successThreshold: 1
+ failureThreshold: 5
+ exec:
+ command:
+ - sh
+ - -c
+ - /health/ping_readiness_local_and_master.sh 1
+ resources:
+ limits:
+ cpu: "2"
+ memory: 8Gi
+ requests:
+ cpu: "2"
+ memory: 8Gi
+ volumeMounts:
+ - name: start-scripts
+ mountPath: /opt/bitnami/scripts/start-scripts
+ - name: health
+ mountPath: /health
+ - name: redis-data
+ mountPath: /data
+ subPath:
+ - name: config
+ mountPath: /opt/bitnami/redis/mounted-etc
+ - name: redis-tmp-conf
+ mountPath: /opt/bitnami/redis/etc
+ volumes:
+ - name: start-scripts
+ configMap:
+ name: helm-redis-scripts
+ defaultMode: 0755
+ - name: health
+ configMap:
+ name: helm-redis-health
+ defaultMode: 0755
+ - name: config
+ configMap:
+ name: helm-redis-configuration
+ - name: redis-tmp-conf
+ emptyDir: {}
+ - name: redis-data
+ emptyDir: {}
+
diff --git a/agent-common/real_project/xj-aks-uav-260304/k8s-srs.yaml b/agent-common/real_project/xj-aks-uav-260304/k8s-srs.yaml
new file mode 100644
index 0000000..ce73868
--- /dev/null
+++ b/agent-common/real_project/xj-aks-uav-260304/k8s-srs.yaml
@@ -0,0 +1,496 @@
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-srs-cm
+ namespace: xj-aks-uav-260304
+ labels:
+ cmii.app: live-srs
+ cmii.type: live
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+data:
+ srs.rtc.conf: |-
+ listen 31935;
+ max_connections 4096;
+ srs_log_tank console;
+ srs_log_level info;
+ srs_log_file /home/srs.log;
+ daemon off;
+ http_api {
+ enabled on;
+ listen 1985;
+ crossdomain on;
+ }
+ stats {
+ network 0;
+ }
+ http_server {
+ enabled on;
+ listen 8080;
+ dir /home/hls;
+ }
+ srt_server {
+ enabled on;
+ listen 30556;
+ maxbw 1000000000;
+ connect_timeout 4000;
+ peerlatency 600;
+ recvlatency 600;
+ }
+ rtc_server {
+ enabled on;
+ listen 30090;
+ candidate $CANDIDATE;
+ }
+ vhost __defaultVhost__ {
+ http_hooks {
+ enabled on;
+ on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
+ }
+ http_remux {
+ enabled on;
+ }
+ rtc {
+ enabled on;
+ rtmp_to_rtc on;
+ rtc_to_rtmp on;
+ keep_bframe off;
+ }
+ tcp_nodelay on;
+ min_latency on;
+ play {
+ gop_cache off;
+ mw_latency 100;
+ mw_msgs 10;
+ }
+ publish {
+ firstpkt_timeout 8000;
+ normal_timeout 4000;
+ mr on;
+ }
+ dvr {
+ enabled off;
+ dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
+ dvr_plan session;
+ }
+ hls {
+ enabled on;
+ hls_path /home/hls;
+ hls_fragment 10;
+ hls_window 60;
+ hls_m3u8_file [app]/[stream].m3u8;
+ hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
+ hls_cleanup on;
+ hls_entry_prefix http://120.205.80.23:8088;
+ }
+ }
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc-exporter
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ nodePort: 31935
+ - name: rtc
+ protocol: UDP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: rtc-tcp
+ protocol: TCP
+ port: 30090
+ targetPort: 30090
+ nodePort: 30090
+ - name: srt
+ protocol: UDP
+ port: 30556
+ targetPort: 30556
+ nodePort: 30556
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ nodePort: 30080
+ selector:
+ srs-role: rtc
+ type: NodePort
+ sessionAffinity: None
+ externalTrafficPolicy: Cluster
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srs-svc
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8080
+ targetPort: 8080
+ - name: api
+ protocol: TCP
+ port: 1985
+ targetPort: 1985
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-srsrtc-svc
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - name: rtmp
+ protocol: TCP
+ port: 31935
+ targetPort: 31935
+ selector:
+ srs-role: rtc
+ type: ClusterIP
+ sessionAffinity: None
+
+---
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: helm-live-srs-rtc
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-srs
+ cmii.type: live
+ helm.sh/chart: cmlc-live-srs-rtc-2.0.0
+ srs-role: rtc
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ srs-role: rtc
+ template:
+ metadata:
+ labels:
+ srs-role: rtc
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-srs-cm
+ items:
+ - key: srs.rtc.conf
+ path: docker.conf
+ defaultMode: 420
+ - name: srs-vol
+ emptyDir:
+ sizeLimit: 8Gi
+ containers:
+ - name: srs-rtc
+ image: 192.168.5.41:8033/cmii/srs:v5.0.195
+ ports:
+ - name: srs-rtmp
+ containerPort: 31935
+ protocol: TCP
+ - name: srs-api
+ containerPort: 1985
+ protocol: TCP
+ - name: srs-flv
+ containerPort: 8080
+ protocol: TCP
+ - name: srs-webrtc
+ containerPort: 30090
+ protocol: UDP
+ - name: srs-webrtc-tcp
+ containerPort: 30090
+ protocol: TCP
+ - name: srs-srt
+ containerPort: 30556
+ protocol: UDP
+ env:
+ - name: CANDIDATE
+ value: 120.205.80.23
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /usr/local/srs/conf/docker.conf
+ subPath: docker.conf
+ - name: srs-vol
+ mountPath: /home/dvr
+ subPath: xj-aks-uav-260304/helm-live/dvr
+ - name: srs-vol
+ mountPath: /home/hls
+ subPath: xj-aks-uav-260304/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ - name: oss-adaptor
+ image: 192.168.5.41:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
+ env:
+ - name: OSS_ENDPOINT
+ value: 'http://helm-minio:9000'
+ - name: OSS_AK
+ value: cmii
+ - name: OSS_SK
+ value: 'B#923fC7mk'
+ - name: OSS_BUCKET
+ value: live-cluster-hls
+ - name: SRS_OP
+ value: 'http://helm-live-op-svc-v2:8080'
+ - name: MYSQL_ENDPOINT
+ value: 'helm-mysql:3306'
+ - name: MYSQL_USERNAME
+ value: k8s_admin
+ - name: MYSQL_PASSWORD
+ value: fP#UaH6qQ3)8
+ - name: MYSQL_DATABASE
+ value: cmii_live_srs_op
+ - name: MYSQL_TABLE
+ value: live_segment
+ - name: LOG_LEVEL
+ value: info
+ - name: OSS_META
+ value: 'yes'
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-vol
+ mountPath: /cmii/share/hls
+ subPath: xj-aks-uav-260304/helm-live/hls
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ serviceName: helm-live-srsrtc-svc
+ podManagementPolicy: OrderedReady
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ partition: 0
+ revisionHistoryLimit: 10
+---
+# live-srs部分
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: helm-live-op-v2
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+ helm.sh/chart: cmlc-live-live-op-2.0.0
+ live-role: op-v2
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ live-role: op-v2
+ template:
+ metadata:
+ labels:
+ live-role: op-v2
+ spec:
+ volumes:
+ - name: srs-conf-file
+ configMap:
+ name: helm-live-op-cm-v2
+ items:
+ - key: live.op.conf
+ path: bootstrap.yaml
+ defaultMode: 420
+ containers:
+ - name: helm-live-op-v2
+ image: 192.168.5.41:8033/cmii/cmii-live-operator:5.2.0
+ ports:
+ - name: operator
+ containerPort: 8080
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 4800m
+ memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ volumeMounts:
+ - name: srs-conf-file
+ mountPath: /cmii/bootstrap.yaml
+ subPath: bootstrap.yaml
+ livenessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /cmii/health
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ periodSeconds: 20
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ imagePullPolicy: Always
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ imagePullSecrets:
+ - name: harborsecret
+ affinity: {}
+ schedulerName: default-scheduler
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 25%
+ maxSurge: 25%
+ revisionHistoryLimit: 10
+ progressDeadlineSeconds: 600
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc-v2
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ nodePort: 30333
+ selector:
+ live-role: op-v2
+ type: NodePort
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: helm-live-op-svc
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ live-role: op
+ type: ClusterIP
+ sessionAffinity: None
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: helm-live-op-cm-v2
+ namespace: xj-aks-uav-260304
+ labels:
+ octopus.control: wdd
+ app.kubernetes.io/managed-by: octopus
+ cmii.app: live-engine
+ cmii.type: live
+data:
+ live.op.conf: |-
+ server:
+ port: 8080
+ spring:
+ main:
+ allow-bean-definition-overriding: true
+ allow-circular-references: true
+ application:
+ name: cmii-live-operator
+ platform:
+ info:
+ name: cmii-live-operator
+ description: cmii-live-operator
+ version: 2.0
+ scanPackage: com.cmii.live.op
+ cloud:
+ nacos:
+ config:
+ username: nacos
+ password: KingKong@95461234
+ server-addr: helm-nacos:8848
+ extension-configs:
+ - data-id: cmii-live-operator.yml
+ group: 2.0
+ refresh: true
+ shared-configs:
+ - data-id: cmii-backend-system.yml
+ group: 2.0
+ refresh: true
+ discovery:
+ enabled: false
+
+ live:
+ engine:
+ type: srs
+ endpoint: 'http://helm-live-srs-svc:1985'
+ proto:
+ rtmp: 'rtmp://120.205.80.23:31935'
+ rtsp: 'rtsp://120.205.80.23:30554'
+ srt: 'srt://120.205.80.23:30556'
+ flv: 'http://120.205.80.23:30500'
+ hls: 'http://120.205.80.23:30500'
+ rtc: 'webrtc://120.205.80.23:30080'
+ replay: 'https://120.205.80.23:30333'
+ minio:
+ endpoint: http://helm-minio:9000
+ access-key: cmii
+ secret-key: B#923fC7mk
+ bucket: live-cluster-hls
diff --git a/agent-deploy/a_dashboard/TemplateK8SDashboard.go b/agent-deploy/a_dashboard/TemplateK8SDashboard.go
index 4f60126..c0a449c 100644
--- a/agent-deploy/a_dashboard/TemplateK8SDashboard.go
+++ b/agent-deploy/a_dashboard/TemplateK8SDashboard.go
@@ -217,9 +217,9 @@ spec:
containers:
- name: dashboard-metrics-scraper
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.8
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/metrics-scraper:v1.0.9
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}metrics-scraper:v1.0.8
+ image: {{ .HarborIPOrCustomImagePrefix }}metrics-scraper:v1.0.9
{{- end }}
ports:
- containerPort: 8000
diff --git a/agent-deploy/c_middle/CmiiEmqxTemplate.go b/agent-deploy/c_middle/CmiiEmqxTemplate.go
index 981583f..2f6a2b8 100644
--- a/agent-deploy/c_middle/CmiiEmqxTemplate.go
+++ b/agent-deploy/c_middle/CmiiEmqxTemplate.go
@@ -12,7 +12,7 @@ metadata:
cmii.type: middleware
cmii.app: helm-emqxs
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
type: Opaque
stringData:
# Dashboard管理员密码
@@ -370,7 +370,7 @@ metadata:
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
replicas: 1
serviceName: helm-emqxs-headless
@@ -391,7 +391,7 @@ spec:
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
affinity:
nodeAffinity:
@@ -432,9 +432,9 @@ spec:
- name: prepare-bootstrap
# 动态选择 tools 镜像
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/tools:1.0
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/os-shell:12-debian-12-r51
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}cmii/tools:1.0
+ image: {{ .HarborIPOrCustomImagePrefix }}cmii/os-shell:12-debian-12-r51
{{- end }}
imagePullPolicy: IfNotPresent
# =========================================================
@@ -548,9 +548,9 @@ spec:
- name: init-dashboard
# 动态选择 tools 镜像
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/tools:1.0
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/os-shell:12-debian-12-r51
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}cmii/tools:1.0
+ image: {{ .HarborIPOrCustomImagePrefix }}cmii/os-shell:12-debian-12-r51
{{- end }}
imagePullPolicy: IfNotPresent
@@ -618,7 +618,7 @@ metadata:
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
type: ClusterIP
clusterIP: None
@@ -657,7 +657,7 @@ metadata:
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
type: NodePort
selector:
diff --git a/agent-deploy/c_middle/CmiiMongoTemplate.go b/agent-deploy/c_middle/CmiiMongoTemplate.go
index 5790fc5..7fe51d7 100644
--- a/agent-deploy/c_middle/CmiiMongoTemplate.go
+++ b/agent-deploy/c_middle/CmiiMongoTemplate.go
@@ -11,7 +11,7 @@ metadata:
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
type: NodePort
selector:
@@ -33,7 +33,7 @@ metadata:
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
serviceName: helm-mongo
replicas: 1
@@ -48,7 +48,7 @@ spec:
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
diff --git a/agent-deploy/c_middle/CmiiMySQLTemplate.go b/agent-deploy/c_middle/CmiiMySQLTemplate.go
index 137e32c..559a1d5 100644
--- a/agent-deploy/c_middle/CmiiMySQLTemplate.go
+++ b/agent-deploy/c_middle/CmiiMySQLTemplate.go
@@ -311,9 +311,9 @@ spec:
initContainers:
- name: change-volume-permissions
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/os-shell:12-debian-12-r51
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}bitnami-shell:11-debian-11-r136
+ image: {{ .HarborIPOrCustomImagePrefix }}os-shell:12-debian-12-r51
{{- end }}
imagePullPolicy: "Always"
command:
diff --git a/agent-deploy/c_middle/CmiiNacosTemplate.go b/agent-deploy/c_middle/CmiiNacosTemplate.go
index 384ede6..f5d3f9f 100644
--- a/agent-deploy/c_middle/CmiiNacosTemplate.go
+++ b/agent-deploy/c_middle/CmiiNacosTemplate.go
@@ -11,7 +11,7 @@ metadata:
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
@@ -29,7 +29,7 @@ metadata:
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
type: NodePort
selector:
@@ -57,7 +57,7 @@ metadata:
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
serviceName: helm-nacos
replicas: 1
@@ -72,7 +72,7 @@ spec:
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
diff --git a/agent-deploy/c_middle/CmiiRabbitMQTemplate.go b/agent-deploy/c_middle/CmiiRabbitMQTemplate.go
index 932276d..0045eaf 100644
--- a/agent-deploy/c_middle/CmiiRabbitMQTemplate.go
+++ b/agent-deploy/c_middle/CmiiRabbitMQTemplate.go
@@ -194,9 +194,9 @@ spec:
initContainers:
- name: volume-permissions
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/bitnami-shell:11-debian-11-r136
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/os-shell:12-debian-12-r51
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}bitnami-shell:11-debian-11-r136
+ image: {{ .HarborIPOrCustomImagePrefix }}os-shell:12-debian-12-r51
{{- end }}
imagePullPolicy: "Always"
command:
@@ -217,9 +217,9 @@ spec:
containers:
- name: rabbitmq
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/rabbitmq:3.9.12-debian-10-r3
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/rabbitmq:3.13.7-debian-12-r5
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}rabbitmq:3.9.12-debian-10-r3
+ image: {{ .HarborIPOrCustomImagePrefix }}rabbitmq:3.13.7-debian-12-r5
{{- end }}
imagePullPolicy: "Always"
env:
diff --git a/agent-deploy/c_middle/CmiiRedisTemplate.go b/agent-deploy/c_middle/CmiiRedisTemplate.go
index 62a2f2d..5a495ce 100644
--- a/agent-deploy/c_middle/CmiiRedisTemplate.go
+++ b/agent-deploy/c_middle/CmiiRedisTemplate.go
@@ -462,7 +462,7 @@ metadata:
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
- replicas: 1
+ replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
@@ -495,9 +495,9 @@ spec:
containers:
- name: redis
{{- if .HarborPort }}
- image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/redis:6.2.14-debian-11-r1
+ image: {{ .HarborIPOrCustomImagePrefix }}:{{ .HarborPort }}/cmii/redis:6.2.14-debian-11-r19
{{- else }}
- image: {{ .HarborIPOrCustomImagePrefix }}redis:6.2.14-debian-11-r1
+ image: {{ .HarborIPOrCustomImagePrefix }}redis:6.2.14-debian-11-r19
{{- end }}
imagePullPolicy: "Always"
securityContext:
diff --git a/agent-deploy/c_middle/TemplateCmiiPVC.go b/agent-deploy/c_middle/TemplateCmiiPVC.go
index c825148..2cc0616 100644
--- a/agent-deploy/c_middle/TemplateCmiiPVC.go
+++ b/agent-deploy/c_middle/TemplateCmiiPVC.go
@@ -10,7 +10,7 @@ metadata:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
storageClassName: nfs-prod-distribute
accessModes:
@@ -29,7 +29,7 @@ metadata:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
storageClassName: nfs-prod-distribute
accessModes:
@@ -48,7 +48,7 @@ metadata:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
storageClassName: nfs-prod-distribute
accessModes:
@@ -67,7 +67,7 @@ metadata:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
storageClassName: nfs-prod-distribute
accessModes:
diff --git a/agent-deploy/d_app/CmiiImageConfig.go b/agent-deploy/d_app/CmiiImageConfig.go
index 85784f1..72b3874 100644
--- a/agent-deploy/d_app/CmiiImageConfig.go
+++ b/agent-deploy/d_app/CmiiImageConfig.go
@@ -156,26 +156,38 @@ var CmiiGISAppMap = map[string]string{
"cmii-uav-grid-manage": "5.4.0",
}
+var MiddlewareARM64 = []string{
+ "bitnamilegacy/redis:6.2.14-debian-11-r19",
+ "bitnamilegacy/mysql:8.1.0-debian-11-r42",
+ "bitnamilegacy/os-shell:12-debian-12-r51",
+ "bitnamilegacy/rabbitmq:3.13.7-debian-12-r5",
+ "ossrs/srs:v5.0.195",
+ "emqx/emqx:5.8.8",
+ "bitnamilegacy/influxdb:2.7.11-debian-12-r19",
+ "minio/minio:RELEASE.2023-06-02T23-17-26Z",
+ "kubernetesui/dashboard:v2.7.0",
+ "kubernetesui/metrics-scraper:v1.0.9",
+ "dyrnq/nfs-subdir-external-provisioner:v4.0.2",
+}
+
var MiddlewareAmd64 = []string{
"harbor.cdcyy.com.cn/cmii/redis:6.2.14-debian-11-r1",
- "harbor.cdcyy.com.cn/cmii/mysql:8.0.35-debian-11-r1",
"harbor.cdcyy.com.cn/cmii/mysql:8.1.0-debian-11-r42",
"harbor.cdcyy.com.cn/cmii/chronyd:0.4.3",
- "harbor.cdcyy.com.cn/cmii/bitnami-shell:10-debian-10-r140",
- "harbor.cdcyy.com.cn/cmii/bitnami-shell:11-debian-11-r136",
- "harbor.cdcyy.com.cn/cmii/rabbitmq:3.9.12-debian-10-r3",
- "harbor.cdcyy.com.cn/cmii/rabbitmq:3.11.26-debian-11-r2",
+ "bitnamilegacy/os-shell:12-debian-12-r51",
+ "harbor.cdcyy.com.cn/cmii/os-shell:12-debian-12-r51",
+ "bitnamilegacy/rabbitmq:3.13.7-debian-12-r5",
+ "harbor.cdcyy.com.cn/cmii/rabbitmq:3.13.7-debian-12-r5",
"docker.m.daocloud.io/ossrs/srs:v4.0.136",
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
- "docker.m.daocloud.io/ossrs/srs:v4.0-r3",
- "docker.m.daocloud.io/emqx/emqx:4.4.19",
- "harbor.cdcyy.com.cn/cmii/emqx:5.5.8",
+ "docker.m.daocloud.io/ossrs/srs:v.0-r3",
+ "harbor.cdcyy.com.cn/cmii/emqx:5.8.8",
"docker.m.daocloud.io/nacos/nacos-server:v2.1.2",
"docker.m.daocloud.io/nacos/nacos-server:v2.1.2-slim",
"docker.m.daocloud.io/library/mongo:5.0",
- "docker.m.daocloud.io/library/rabbitmq:3.9-management",
"docker.m.daocloud.io/bitnami/minio:2022.5.4",
"harbor.cdcyy.com.cn/cmii/minio:2023.5.4",
+ "harbor.cdcyy.com.cn/cmii/influxdb:2.7.11-debian-12-r19",
"docker.m.daocloud.io/minio/minio:RELEASE.2023-06-02T23-17-26Z",
"docker.m.daocloud.io/kubernetesui/dashboard:v2.0.1",
"docker.m.daocloud.io/kubernetesui/dashboard:v2.7.0",
diff --git a/agent-deploy/d_app/FrontendConfigMap.go b/agent-deploy/d_app/FrontendConfigMap.go
index 06d51bd..69ed49f 100644
--- a/agent-deploy/d_app/FrontendConfigMap.go
+++ b/agent-deploy/d_app/FrontendConfigMap.go
@@ -49,6 +49,7 @@ var FrontendShortNameMaps = map[string]string{
"cmii-uav-platform-awareness": "awareness",
"cmii-uav-platform-flight-control": "flight-control",
"cmii-uav-platform-iot-manager": "iot",
+ "cmii-uav-platform-lite": "lite",
}
var FrontendClientIdMaps = map[string]string{
@@ -100,4 +101,5 @@ var FrontendClientIdMaps = map[string]string{
"cmii-uav-platform-iot-manager": "empty",
"cmii-uav-platform-uasmskny": "empty",
"cmii-uav-platform-uaskny": "empty",
+ "cmii-uav-platform-lite": "empty",
}
diff --git a/agent-deploy/d_app/TemplateCmiiBackend.go b/agent-deploy/d_app/TemplateCmiiBackend.go
index 2cb2885..fbc489d 100644
--- a/agent-deploy/d_app/TemplateCmiiBackend.go
+++ b/agent-deploy/d_app/TemplateCmiiBackend.go
@@ -11,7 +11,7 @@ metadata:
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
- app.kubernetes.io/app-version: {{ .TagVersion }}
+ app.kubernetes.io/app-version: "{{ .TagVersion }}"
spec:
replicas: {{ .Replicas }}
strategy:
@@ -189,7 +189,7 @@ metadata:
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
- app.kubernetes.io/app-version: {{ .TagVersion }}
+ app.kubernetes.io/app-version: "{{ .TagVersion }}"
spec:
{{- if .NodePort }}
type: NodePort
@@ -220,7 +220,7 @@ metadata:
cmii.app: {{ .AppName }}
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
- app.kubernetes.io/app-version: {{ .TagVersion }}
+ app.kubernetes.io/app-version: "{{ .TagVersion }}"
spec:
storageClassName: nfs-prod-distribute
accessModes:
diff --git a/agent-deploy/d_app/TemplateCmiiFrontend.go b/agent-deploy/d_app/TemplateCmiiFrontend.go
index 65091e0..1e014b5 100644
--- a/agent-deploy/d_app/TemplateCmiiFrontend.go
+++ b/agent-deploy/d_app/TemplateCmiiFrontend.go
@@ -10,7 +10,7 @@ metadata:
cmii.type: frontend
cmii.app: {{ .AppName }}
octopus.control: frontend-app-wdd
- app.kubernetes.io/app-version: {{ .TagVersion }}
+ app.kubernetes.io/app-version: "{{ .TagVersion }}"
spec:
replicas: {{ .Replicas }}
strategy:
@@ -84,7 +84,7 @@ metadata:
cmii.type: frontend
cmii.app: {{ .AppName }}
octopus.control: frontend-app-wdd
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
spec:
type: ClusterIP
selector:
diff --git a/agent-deploy/d_app/TemplateIngressConfigMap.go b/agent-deploy/d_app/TemplateIngressConfigMap.go
index 29622e6..90bbbee 100644
--- a/agent-deploy/d_app/TemplateIngressConfigMap.go
+++ b/agent-deploy/d_app/TemplateIngressConfigMap.go
@@ -61,7 +61,7 @@ metadata:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
@@ -115,7 +115,7 @@ metadata:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
@@ -158,7 +158,7 @@ metadata:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
- app.kubernetes.io/version: {{ .TagVersion }}
+ app.kubernetes.io/version: "{{ .TagVersion }}"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
diff --git a/agent-operator/CmiiDeployOperator_test.go b/agent-operator/CmiiDeployOperator_test.go
index 0b59c54..795abfa 100644
--- a/agent-operator/CmiiDeployOperator_test.go
+++ b/agent-operator/CmiiDeployOperator_test.go
@@ -59,6 +59,69 @@ func TestCmiiEnvDeploy_WddSuperCluster(t *testing.T) {
}
+func TestCmiiEnvDeploy_ChengDuGongYeXueYuan(t *testing.T) {
+
+ // 成都市工业职业技术学院
+ commonEnv := &z_dep.CommonEnvironmentConfig{
+ WebIP: "172.22.106.77",
+ WebPort: "8088",
+ HarborIPOrCustomImagePrefix: "172.22.106.77",
+ HarborPort: "8033",
+ Namespace: "sc-cd-uav-260207",
+ TagVersion: "2.0",
+ TenantEnv: "",
+ MinioPublicIP: "",
+ MinioInnerIP: "helm-minio",
+ NFSServerIP: "192.168.5.141",
+ ApplyFilePrefix: "",
+ }
+
+ CmiiEnvDeployOffline(commonEnv, true, real_project.CmiiUavFlyControlImageList)
+
+}
+
+func TestCmiiEnvDeploy_XinJiangAKeSu(t *testing.T) {
+
+ // 新疆阿克苏 2026年3月3日
+ commonEnv := &z_dep.CommonEnvironmentConfig{
+ WebIP: "120.205.80.23",
+ WebPort: "8088",
+ HarborIPOrCustomImagePrefix: "192.168.5.41",
+ HarborPort: "8033",
+ Namespace: "xj-aks-uav-260304",
+ TagVersion: "2.0",
+ TenantEnv: "",
+ MinioPublicIP: "",
+ MinioInnerIP: "helm-minio",
+ NFSServerIP: "192.168.5.141",
+ ApplyFilePrefix: "",
+ }
+
+ CmiiEnvDeployOffline(commonEnv, true, real_project.CmiiUavFlyControlARM64ImageList)
+
+}
+
+func TestCmiiEnvDeploy_MianYangDiKong(t *testing.T) {
+
+ // 绵阳 低空智能飞行
+ commonEnv := &z_dep.CommonEnvironmentConfig{
+ WebIP: "36.133.66.183",
+ WebPort: "8088",
+ HarborIPOrCustomImagePrefix: "192.168.1.4",
+ HarborPort: "8033",
+ Namespace: "sc-my-uav-260202",
+ TagVersion: "2.0",
+ TenantEnv: "",
+ MinioPublicIP: "",
+ MinioInnerIP: "helm-minio",
+ NFSServerIP: "192.168.1.6",
+ ApplyFilePrefix: "",
+ }
+
+ CmiiEnvDeployOffline(commonEnv, true, real_project.CmiiUavFlyControlImageList)
+
+}
+
func TestCmiiEnvDeploy_XiongAnJianGuanPingTai(t *testing.T) {
// 雄安空能院 2025年8月7日
diff --git a/agent-wdd/a_run/one-build-and-upload.ps1 b/agent-wdd/a_run/one-build-and-upload.ps1
index 3e308fd..3001e46 100644
--- a/agent-wdd/a_run/one-build-and-upload.ps1
+++ b/agent-wdd/a_run/one-build-and-upload.ps1
@@ -14,17 +14,22 @@ set HTTPS_PROXY=http://127.0.0.1:7899
# 重新build项目
Set-Location "C:\\Users\\wddsh\\Documents\\IdeaProjects\\WddSuperAgent\\agent-wdd\"
& "C:\Users\wddsh\go\bin\gox.exe" -osarch="linux/amd64" -output "./build/agent-wdd_{{.OS}}_{{.Arch}}"
+
+& "C:\Users\wddsh\go\bin\gox.exe" -osarch="linux/arm64" -output "./build/agent-wdd_{{.OS}}_{{.Arch}}"
+
Set-Location "C:\Users\wddsh\Documents\IdeaProjects\WddSuperAgent\agent-wdd\a_run"
Write-Host "build项目成功"
Write-Host ""
# 删除上面存在的旧的内容
mc.exe rm oracle-seoul-2/seoul-2/agent-wdd_linux_amd64
+mc.exe rm oracle-seoul-2/seoul-2/agent-wdd_linux_arm64
Write-Host "删除旧的内容成功"
Write-Host ""
# 上传文件
mc.exe cp C:\Users\wddsh\Documents\IdeaProjects\WddSuperAgent\agent-wdd\build\agent-wdd_linux_amd64 oracle-seoul-2/seoul-2/
+mc.exe cp C:\Users\wddsh\Documents\IdeaProjects\WddSuperAgent\agent-wdd\build\agent-wdd_linux_arm64 oracle-seoul-2/seoul-2/
Write-Host "上传文件成功"
Write-Host ""
diff --git a/agent-wdd/a_run/run_test.sh b/agent-wdd/a_run/run_test.sh
index 468f888..6047291 100644
--- a/agent-wdd/a_run/run_test.sh
+++ b/agent-wdd/a_run/run_test.sh
@@ -3,7 +3,14 @@
rm -f /usr/local/bin/agent-wdd
rm -f /usr/local/bin/test-shell.sh
-wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -qO /usr/local/bin/agent-wdd
+
+arch=$(uname -m);
+case "$arch" in
+ x86_64) a=amd64 ;;
+ aarch64|arm64) a=arm64 ;;
+ *) echo "unsupported arch: $arch" >&2; exit 1 ;;
+esac;
+wget "https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_${a}" -O /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
diff --git a/agent-wdd/build/agent-wdd_linux_amd64 b/agent-wdd/build/agent-wdd_linux_amd64
index f92f1b1..959816a 100644
Binary files a/agent-wdd/build/agent-wdd_linux_amd64 and b/agent-wdd/build/agent-wdd_linux_amd64 differ