完成 72绵阳项目 71雄安集团监管平台 大量优化更新

This commit is contained in:
zeaslity
2026-02-03 17:07:28 +08:00
parent d962ace967
commit a8f6bda703
93 changed files with 21632 additions and 185 deletions

211
.idea/workspace.xml generated
View File

@@ -4,31 +4,100 @@
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="新增GPU部分">
<change afterPath="$PROJECT_DIR$/.idea/go.imports.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v2.7.0.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v7.10.2-无法启动.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.4.xlsx" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/harbor-secret.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-postgresql-timescaledb.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-timescaledb-16C32GB-prod.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/kubernetes-dashboard-v2.7.0.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/tpu_plugin_pcie.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/70-202511-XA低空平台/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/cmii-hosts.txt" afterDir="false" />
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="新增RMDC部分的代码">
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/minio整体复制.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/1-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/2-disk.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/ImageSyncDLTU.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris-deploy/修改pvc-然后statefulset中的image.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris数据同步/doris-data-import.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/doris数据同步/同步脚本.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/hosts.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-admin-token.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-emqx-5.8.8-OK.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-ingress-13014.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-pyfusion-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/k8s-yaml/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/71-202601-XA监管平台/rke-13014-cluster.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/1-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/ImageSyncDLTU.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-admin-token.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-app/fly-center.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-app/lite.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-app/sky-coverage.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-ingress-13014.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-deploy/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/k8s-middle/helm-influxdb.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/72-202602-绵阳飞服/rke-13014-cluster.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/b-高级磁盘-disk.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/c-联网-docker安装.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/APT代理方式.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/apt切换源-支持20-24版本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/install-nginx-cn.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/3-扩展磁盘.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/prompt.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/doris-deploy/修改pvc-然后statefulset中的image.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/999-部署模板/工具模板/超级瑞士军刀-dockerfile" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/58-202503-新DEMO环境/批量指令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/58-202503-新DEMO环境/批量指令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" beforeDir="false" afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/a-批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx-国外.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/更新脚本/副本数调整.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/更新脚本/副本数调整.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -60,39 +129,40 @@
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">{
&quot;keyToString&quot;: {
&quot;KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION&quot;: &quot;true&quot;,
&quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
&quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.formatter.settings.were.checked&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.migrated.go.modules.settings&quot;: &quot;true&quot;,
&quot;RunOnceActivity.typescript.service.memoryLimit.init&quot;: &quot;true&quot;,
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/69-202511-AI-GPU测试&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;editor.preferences.tabs&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
<component name="PropertiesComponent"><![CDATA[{
"keyToString": {
"KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION": "true",
"RunOnceActivity.ShowReadmeOnStart": "true",
"RunOnceActivity.TerminalTabsStorage.copyFrom.TerminalArrangementManager.252": "true",
"RunOnceActivity.git.unshallow": "true",
"RunOnceActivity.go.formatter.settings.were.checked": "true",
"RunOnceActivity.go.migrated.go.modules.settings": "true",
"RunOnceActivity.typescript.service.memoryLimit.init": "true",
"SHARE_PROJECT_CONFIGURATION_FILES": "true",
"git-widget-placeholder": "main",
"go.import.settings.migrated": "true",
"last_opened_file_path": "C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/72-202602-绵阳飞服/k8s-deploy",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
"settings.editor.selected.configurable": "editor.preferences.tabs",
"vue.rearranger.settings.migration": "true"
},
&quot;keyToStringList&quot;: {
&quot;DatabaseDriversLRU&quot;: [
&quot;mysql&quot;
"keyToStringList": {
"DatabaseDriversLRU": [
"mysql"
]
}
}</component>
}]]></component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\69-202511-AI-GPU测试" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\70-202511-XA低空平台" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\68-202511-k8s升级1-30-14版本" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\72-202602-绵阳飞服\k8s-deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\72-202602-绵阳飞服\k8s-app" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\72-202602-绵阳飞服" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\72-202602-绵阳飞服\k8s-middle" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\71-202601-XA监管平台\k8s-yaml" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\关停计划\备份" />
@@ -104,8 +174,8 @@
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-jdk-30f59d01ecdd-cffe25b9f5b3-intellij.indexing.shared.core-IU-253.28294.334" />
<option value="bundled-js-predefined-d6986cc7102b-c7e53b3be11b-JavaScript-IU-253.28294.334" />
<option value="bundled-jdk-30f59d01ecdd-2fc7cc6b9a17-intellij.indexing.shared.core-IU-253.30387.90" />
<option value="bundled-js-predefined-d6986cc7102b-9b0f141eb926-JavaScript-IU-253.30387.90" />
</set>
</attachedChunks>
</component>
@@ -201,7 +271,26 @@
<workItem from="1765435760830" duration="16000" />
<workItem from="1765453325001" duration="1343000" />
<workItem from="1765519520794" duration="3192000" />
<workItem from="1765532805423" duration="595000" />
<workItem from="1765532805423" duration="626000" />
<workItem from="1765602984389" duration="12000" />
<workItem from="1765969886034" duration="1896000" />
<workItem from="1766020800999" duration="11000" />
<workItem from="1767756685243" duration="323000" />
<workItem from="1767777206727" duration="148000" />
<workItem from="1767777382028" duration="201000" />
<workItem from="1767969716597" duration="9000" />
<workItem from="1768186836316" duration="55000" />
<workItem from="1768534830097" duration="10971000" />
<workItem from="1768785679919" duration="4392000" />
<workItem from="1768790119545" duration="15248000" />
<workItem from="1768870413078" duration="3938000" />
<workItem from="1768874455602" duration="9804000" />
<workItem from="1768901601131" duration="11000" />
<workItem from="1768956848179" duration="3924000" />
<workItem from="1769045469349" duration="3662000" />
<workItem from="1769068094686" duration="1441000" />
<workItem from="1769129189671" duration="15000" />
<workItem from="1770084087770" duration="16325000" />
</task>
<task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" />
@@ -235,13 +324,22 @@
<option name="project" value="LOCAL" />
<updated>1762942452911</updated>
</task>
<option name="localTasksCounter" value="5" />
<task id="LOCAL-00005" summary="新增RMDC部分的代码">
<option name="closed" value="true" />
<created>1765534772530</created>
<option name="number" value="00005" />
<option name="presentableId" value="LOCAL-00005" />
<option name="project" value="LOCAL" />
<updated>1765534772530</updated>
</task>
<option name="localTasksCounter" value="6" />
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="UnknownFeatures">
<option featureType="com.intellij.fileTypeFactory" implementationName="*.sh" />
<option featureType="com.intellij.fileTypeFactory" implementationName="*.conf" />
</component>
<component name="Vcs.Log.Tabs.Properties">
@@ -260,6 +358,7 @@
<MESSAGE value="修改CICD的jenkins构建脚本" />
<MESSAGE value="新增雄安空能院项目" />
<MESSAGE value="新增GPU部分" />
<option name="LAST_COMMIT_MESSAGE" value="新增GPU部分" />
<MESSAGE value="新增RMDC部分的代码" />
<option name="LAST_COMMIT_MESSAGE" value="新增RMDC部分的代码" />
</component>
</project>

View File

@@ -8,7 +8,7 @@ data:
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmii"
username: "cmlc"
password: "odD8#Ve7.B"
topics:

View File

@@ -0,0 +1,11 @@
mc alias set minio-old http://10.22.48.5:39110 cmii B#923fC7mk
mc alias set minio-new http://10.22.48.7:39010 cmii B#923fC7mk
mc ls minio-old
mc ls minio-new
mc mirror --overwrite --watch minio-old minio-new

View File

@@ -0,0 +1,40 @@
mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
# 主节点安装ssh-key
/usr/local/bin/agent-wdd base ssh config
/usr/local/bin/agent-wdd base ssh key
# 批量执行命令
host_list=(
10.22.57.5
10.22.57.6
10.22.57.7
10.22.57.3
10.22.57.4
)
for server in "${host_list[@]}";do
echo "current ip is $server"
ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info"
echo ""
done
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
ssh root@${server} "echo yes"
ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
ssh root@${server} "apt-get update"
ssh root@${server} "apt-get install -y gparted"

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
# 用户配置部分
DISK="/dev/vdb" # 要操作的物理磁盘(请根据实际情况修改)
MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
FS_TYPE="ext4" # 文件系统类型支持ext4/xfs默认ext4
#----------------------------------------------------------
# 核心逻辑(建议非必要不修改)
#----------------------------------------------------------
function check_prerequisites() {
# 必须root权限运行检查
[[ $EUID -ne 0 ]] && echo -e "\033[31m错误必须使用root权限运行此脚本\033[0m" && exit 1
# 磁盘存在性检查
[[ ! -b "$DISK" ]] && echo -e "\033[31m错误磁盘 $DISK 不存在\033[0m" && exit 1
# 文件系统类型校验
if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
echo -e "\033[31m错误不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
exit 1
fi
}
function prepare_disk() {
local partition="${DISK}1"
echo -e "\033[34m正在初始化磁盘分区...\033[0m"
parted "$DISK" --script mklabel gpt
parted "$DISK" --script mkpart primary 0% 100%
parted "$DISK" --script set 1 lvm on
partprobe "$DISK" # 确保系统识别新分区表
echo -e "\033[34m正在创建LVM结构...\033[0m"
pvcreate "$partition"
vgcreate datavg "$partition"
lvcreate -y -l 100%FREE -n lvdata datavg
}
function format_and_mount() {
echo -e "\033[34m格式化逻辑卷...\033[0m"
if [[ "$FS_TYPE" == "ext4" ]]; then
mkfs.ext4 -F "/dev/datavg/lvdata"
else
mkfs.xfs -f "/dev/datavg/lvdata"
fi
echo -e "\033[34m设置挂载配置...\033[0m"
mkdir -p "$MOUNT_PATH"
UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
mount -a
}
function verify_result() {
echo -e "\n\033[1;36m最终验证结果\033[0m"
lsblk -f "$DISK"
echo -e "\n磁盘空间使用情况"
df -hT "$MOUNT_PATH"
}
# 主执行流程
check_prerequisites
prepare_disk
format_and_mount
verify_result
echo -e "\n\033[32m操作执行完毕请仔细核查上述输出信息\033[0m"
#请写一个shell脚本脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
#1.将物理磁盘的盘符修改为gpt格式
#2.将物理磁盘全部空间创建一个分区分区格式为lvm
#3.将分区分配给逻辑卷datavg
#4.将datavg所有可用的空间分配给逻辑卷lvdata
#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
#6.创建变量挂载点路径
#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
#8.执行lsblk和df -TH查看分区是否正确挂载

View File

@@ -0,0 +1,168 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="10.22.57.8:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke13014"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke13014/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/xauas22"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/xauas22/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke13014"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke13014/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/cmii/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
Download_Load_Tag_Upload "rke"
# Load_Tag_Upload "cmii"

View File

@@ -0,0 +1,82 @@
#!/bin/bash
harbor_host=10.22.57.8:8033
namespace=xa-dcity-uas-260116
app_name=""
new_tag=""
download_from_oss() {
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
echo "start to download => $1"
wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
echo ""
echo ""
}
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
download_from_oss "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
namespace: xa-dcity-uas-260116
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort

View File

@@ -0,0 +1,223 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
# 确保 hostname 和 subdomain 正确设置
hostname: $(POD_NAME)
subdomain: doris-cluster-be-internal
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
image: '10.22.57.8:8033/cmii/alpine:3.23.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 ; swapoff -a ; ulimit -n 655350
resources:
limits:
cpu: '1'
memory: 1Gi
requests:
cpu: '0.5'
memory: 500Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
# 添加 securityContext 提升文件描述符限制
securityContext:
capabilities:
add:
- SYS_RESOURCE
- IPC_LOCK
runAsUser: 0
image: '10.22.57.8:8033/cmii/doris.be-ubuntu:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '4'
memory: 4Gi
requests:
cpu: '1'
memory: 2Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-log
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-be
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-storage
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort

View File

@@ -0,0 +1,198 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: meta
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-meta-pvc
- name: log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: '10.22.57.8:8033/cmii/doris.fe-ubuntu:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '4'
memory: 4Gi
requests:
cpu: '1'
memory: 2Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: log
mountPath: /opt/apache-doris/fe/log
- name: meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-fe
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: meta
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10G
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: xa-dcity-uas-260116
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: xa-dcity-uas-260116
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: xa-dcity-uas-260116
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: xa-dcity-uas-260116
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi

View File

@@ -0,0 +1,31 @@
修改PVC文件
修改全部的NAMESPACE
修改statefulset里面的IMAGE
# BE出现下面的错误
like
* soft nofile 655350
* hard nofile 655350
and then run 'ulimit -n 655350' to take effect on current session.
需要修改 ContainerD的Service
/lib/systemd/system/containerd.service
[Service]
LimitNOFILE=655350 <<<
LimitNPROC=infinity
LimitMEMLOCK=infinity
EOF
# 2. 重新加载并重启 containerd
sudo systemctl daemon-reload
sudo systemctl restart containerd
# 3. 验证 containerd 的限制
ps aux | grep containerd | grep -v grep
cat /proc/$(pgrep containerd | head -1)/limits | grep "open files"
# 应该显示: Max open files 655350 655350 files

View File

@@ -0,0 +1,634 @@
#!/usr/bin/env bash
#===============================================================================
# Author: Smith Wang
# Version: 1.0.0
# License: MIT
# Filename: doris_csv_stream_load.sh
#
# Description:
# 1) Use wget to download one or multiple CSV files (resume + size verification)
# 2) Import into Apache Doris via Stream Load API using curl
#
# Module dependencies:
# - bash (>= 5.0)
# - wget
# - curl
# - awk, sed, grep, stat, date, mktemp
#
# ShellCheck:
# shellcheck -x doris_csv_stream_load.sh
#===============================================================================
set -euo pipefail
IFS=$'\n\t'
#===============================================================================
# Global Constants
#===============================================================================
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="1.0.0"
readonly DEFAULT_WORKDIR="./doris_csv_downloads"
readonly DEFAULT_NATIONAL_DIR_URL="https://oss.demo.uavcmlc.com/cmlc-installation/doris/all"
readonly DEFAULT_NATIONAL_COUNT="6" # suffix 0..5
readonly DEFAULT_REGION_URL="https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv"
# Doris defaults (override by args)
readonly DEFAULT_DORIS_USER="root"
readonly DEFAULT_DORIS_PASS="" # empty by default (root:)
readonly DEFAULT_DORIS_BE_PORT="8040" # Stream Load port
# wget/curl behavior
readonly WGET_RETRIES="10"
readonly WGET_TIMEOUT_SEC="30"
readonly CURL_TIMEOUT_SEC="600" # per file; adjust if needed
readonly LOCKFILE="/tmp/${SCRIPT_NAME}.lock"
#===============================================================================
# Runtime Config (set by args)
#===============================================================================
ACTION="all" # download|load|all
WORKDIR="${DEFAULT_WORKDIR}"
NATIONAL_DIR_URL="${DEFAULT_NATIONAL_DIR_URL}"
NATIONAL_PREFIX="" # REQUIRED for national mode
NATIONAL_COUNT="${DEFAULT_NATIONAL_COUNT}"
REGION_URL="${DEFAULT_REGION_URL}"
DORIS_BE_IP="" # REQUIRED
DORIS_BE_PORT="${DEFAULT_DORIS_BE_PORT}"
DORIS_USER="${DEFAULT_DORIS_USER}"
DORIS_PASS="${DEFAULT_DORIS_PASS}"
DORIS_DB="cmii"
DORIS_TABLE="dwd_reg_grid_city_detail_dd"
COLUMN_SEPARATOR=","
# Derived
DOWNLOAD_LIST_FILE=""
STREAMLOAD_LOG_DIR=""
#===============================================================================
# ASCII Call Graph
#===============================================================================
# main
# ├─ acquire_lock
# ├─ parse_args
# ├─ validate_env
# ├─ prepare_workdir
# ├─ build_download_list
# ├─ run_downloads
# │ ├─ get_remote_size_bytes
# │ ├─ download_one
# │ └─ verify_file_size
# ├─ run_stream_load
# │ ├─ stream_load_one
# │ └─ parse_stream_load_response
# └─ release_lock (trap)
#===============================================================================
#===============================================================================
# Logging
#===============================================================================
LOG_LEVEL="INFO" # DEBUG|INFO|WARN|ERROR
### Print log line with level
# @param level string Log level
# @param msg string Message
# @return 0 Success
log() {
local level="$1"
local msg="$2"
local ts
ts="$(date '+%Y-%m-%d %H:%M:%S')"
>&2 printf '%s [%s] %s: %s\n' "$ts" "$level" "$SCRIPT_NAME" "$msg"
}
### Debug log
# @param msg string Message
# @return 0 Success
log_debug() { [[ "$LOG_LEVEL" == "DEBUG" ]] && log "DEBUG" "$1" || true; }
### Info log
# @param msg string Message
# @return 0 Success
log_info() { log "INFO" "$1"; }
### Warn log
# @param msg string Message
# @return 0 Success
log_warn() { log "WARN" "$1"; }
### Error log
# @param msg string Message
# @return 0 Success
log_error() { log "ERROR" "$1"; }
#===============================================================================
# Error / Cleanup
#===============================================================================
TMPDIR=""
CLEANUP_FILES=()
### Cleanup handler
# @return 0 Always
cleanup() {
local exit_code=$?
# > cleanup temp resources
if [[ -n "${TMPDIR}" && -d "${TMPDIR}" ]]; then
rm -rf "${TMPDIR}" || true
fi
# > release lock
release_lock || true
if [[ $exit_code -ne 0 ]]; then
log_error "Exiting with code ${exit_code}"
fi
exit "$exit_code"
}
trap cleanup EXIT INT TERM
### Fail with message
# @param msg string Error message
# @return 1 Always
die() {
log_error "$1"
return 1
}
#===============================================================================
# Lock
#===============================================================================
### Acquire a simple lock to avoid concurrent runs
# @require flock (optional) OR atomic mkdir (fallback)
# @return 0 Success
acquire_lock() {
# > Prefer mkdir lock for POSIX-ish behavior (no flock dependency)
if mkdir "${LOCKFILE}.d" 2>/dev/null; then
log_debug "Lock acquired: ${LOCKFILE}.d"
else
die "Another instance is running (lock exists: ${LOCKFILE}.d). Remove it if you're sure."
fi
}
### Release lock
# @return 0 Success
release_lock() {
if [[ -d "${LOCKFILE}.d" ]]; then
rmdir "${LOCKFILE}.d" 2>/dev/null || true
log_debug "Lock released: ${LOCKFILE}.d"
fi
}
#===============================================================================
# Usage
#===============================================================================
### Print usage
# @return 0 Success
usage() {
cat <<'EOF'
Usage:
doris_csv_stream_load.sh [download|load|all] [options]
Actions:
download Only download CSVs (wget)
load Only stream-load existing CSVs in workdir
all Download then load (default)
Options:
--workdir <path> Download directory (default: ./doris_csv_downloads)
--log-level <DEBUG|INFO|WARN|ERROR>
# National files (suffix 0..5 by default)
--national-dir-url <url> Base directory URL for national files
default: https://oss.demo.uavcmlc.com/cmlc-installation/doris/all
--national-prefix <name_prefix> REQUIRED for national mode
e.g. result_2aee9754dd304ca1-a0651901906f9bb4
--national-count <n> How many files, suffix 0..n-1 (default: 6)
# Optional single region file
--region-url <url> default: https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv
--no-region Skip region file
# Doris stream load config
--doris-be-ip <ip> REQUIRED
--doris-be-port <port> default: 8040
--doris-user <user> default: root
--doris-pass <pass> default: empty
--db <db_name> default: cmii
--table <table_name> default: dwd_reg_grid_city_detail_dd
--column-separator <sep> default: ,
Examples:
# 1) All national(0..5) + region file, download then load:
./doris_csv_stream_load.sh all \
--national-prefix result_2aee9754dd304ca1-a0651901906f9bb4 \
--doris-be-ip 10.10.10.10
# 2) Download only:
./doris_csv_stream_load.sh download \
--national-prefix result_xxx \
--doris-be-ip 10.10.10.10
# 3) Load only (assumes files already in workdir):
./doris_csv_stream_load.sh load \
--national-prefix result_xxx \
--doris-be-ip 10.10.10.10
EOF
}
#===============================================================================
# Args Parsing
#===============================================================================
SKIP_REGION="false"
### Parse CLI arguments
# @param args string[] Command line args
# @return 0 Success
parse_args() {
if [[ $# -ge 1 ]]; then
case "$1" in
download|load|all) ACTION="$1"; shift ;;
-h|--help) usage; exit 0 ;;
*) : ;;
esac
fi
while [[ $# -gt 0 ]]; do
case "$1" in
--workdir) WORKDIR="$2"; shift 2 ;;
--log-level) LOG_LEVEL="$2"; shift 2 ;;
--national-dir-url) NATIONAL_DIR_URL="$2"; shift 2 ;;
--national-prefix) NATIONAL_PREFIX="$2"; shift 2 ;;
--national-count) NATIONAL_COUNT="$2"; shift 2 ;;
--region-url) REGION_URL="$2"; shift 2 ;;
--no-region) SKIP_REGION="true"; shift 1 ;;
--doris-be-ip) DORIS_BE_IP="$2"; shift 2 ;;
--doris-be-port) DORIS_BE_PORT="$2"; shift 2 ;;
--doris-user) DORIS_USER="$2"; shift 2 ;;
--doris-pass) DORIS_PASS="$2"; shift 2 ;;
--db) DORIS_DB="$2"; shift 2 ;;
--table) DORIS_TABLE="$2"; shift 2 ;;
--column-separator) COLUMN_SEPARATOR="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*)
die "Unknown argument: $1"
;;
esac
done
}
#===============================================================================
# Validation / Environment
#===============================================================================
### Validate required tools and config
# @require wget
# @require curl
# @require awk sed grep stat
# @return 0 Success
validate_env() {
command -v wget >/dev/null 2>&1 || die "wget not found"
command -v curl >/dev/null 2>&1 || die "curl not found"
command -v awk >/dev/null 2>&1 || die "awk not found"
command -v sed >/dev/null 2>&1 || die "sed not found"
command -v grep >/dev/null 2>&1 || die "grep not found"
command -v stat >/dev/null 2>&1 || die "stat not found"
[[ -n "${DORIS_BE_IP}" ]] || die "--doris-be-ip is required"
[[ -n "${NATIONAL_PREFIX}" ]] || die "--national-prefix is required (filename changes, must be provided)"
[[ "${NATIONAL_COUNT}" =~ ^[0-9]+$ ]] || die "--national-count must be an integer"
}
### Prepare working directory and temp (追加初始化日志目录)
# @return 0 Success
prepare_workdir() {
mkdir -p "${WORKDIR}"
TMPDIR="$(mktemp -d)"
DOWNLOAD_LIST_FILE="${TMPDIR}/download_list.txt"
# > stream load logs dir
STREAMLOAD_LOG_DIR="${WORKDIR}/_streamload_logs"
mkdir -p "${STREAMLOAD_LOG_DIR}"
log_debug "Workdir: ${WORKDIR}"
log_debug "StreamLoad log dir: ${STREAMLOAD_LOG_DIR}"
}
### Generate a local request id for tracing
# @param csv_path string Local CSV file path
# @return 0 Success (prints request id)
gen_request_id() {
local csv_path="$1"
local ts
ts="$(date '+%Y%m%d_%H%M%S')"
# > sanitize filename for filesystem
local base
base="$(basename "${csv_path}" | sed 's/[^a-zA-Z0-9._-]/_/g')"
printf '%s__%s__%s' "${ts}" "$$" "${base}"
}
### Extract a JSON string field value without jq (best-effort)
# @param json_file string JSON file path
# @param field_name string Field name, e.g. Status
# @return 0 Success (prints value or empty)
json_get_string() {
local json_file="$1"
local field_name="$2"
# > naive parse: "Field":"value"
grep -Eo "\"${field_name}\"[[:space:]]*:[[:space:]]*\"[^\"]*\"" "${json_file}" \
| head -n1 \
| sed -E "s/.*\"${field_name}\"[[:space:]]*:[[:space:]]*\"([^\"]*)\".*/\1/" \
|| true
}
### Extract a JSON numeric field value without jq (best-effort)
# @param json_file string JSON file path
# @param field_name string Field name, e.g. TxnId
# @return 0 Success (prints value or empty)
json_get_number() {
local json_file="$1"
local field_name="$2"
# > naive parse: "Field":12345
grep -Eo "\"${field_name}\"[[:space:]]*:[[:space:]]*[0-9]+" "${json_file}" \
| head -n1 \
| sed -E "s/.*\"${field_name}\"[[:space:]]*:[[:space:]]*([0-9]+).*/\1/" \
|| true
}
#===============================================================================
# Download List Builder
#===============================================================================
### Build download URL list into a file
# @return 0 Success
build_download_list() {
: > "${DOWNLOAD_LIST_FILE}"
# > National files: prefix_0..prefix_(count-1)
local i
for ((i=0; i< NATIONA L_COUNT; i++)); do
printf '%s/%s_%s.csv\n' "${NATIONAL_DIR_URL}" "${NATIONAL_PREFIX}" "${i}" >> "${DOWNLOAD_LIST_FILE}"
done
# > Optional region file
if [[ "${SKIP_REGION}" != "true" ]]; then
printf '%s\n' "${REGION_URL}" >> "${DOWNLOAD_LIST_FILE}"
fi
log_info "Download list prepared: $(wc -l < "${DOWNLOAD_LIST_FILE}") file(s)"
log_debug "Download list content:\n$(cat "${DOWNLOAD_LIST_FILE}")"
}
# NOTE: fix accidental space in variable name (ShellCheck would flag). Keep code correct:
# We'll patch it by redefining function properly.
build_download_list() {
: > "${DOWNLOAD_LIST_FILE}"
# > National files: prefix_0..prefix_(count-1)
local i
for ((i=0; i< NATIONAL_COUNT; i++)); do
printf '%s/%s_%s.csv\n' "${NATIONAL_DIR_URL}" "${NATIONAL_PREFIX}" "${i}" >> "${DOWNLOAD_LIST_FILE}"
done
# > Optional region file
if [[ "${SKIP_REGION}" != "true" ]]; then
printf '%s\n' "${REGION_URL}" >> "${DOWNLOAD_LIST_FILE}"
fi
log_info "Download list prepared: $(wc -l < "${DOWNLOAD_LIST_FILE}") file(s)"
}
#===============================================================================
# Download / Verify
#===============================================================================
### Get remote content length (bytes) via wget --spider
# @param url string Remote URL
# @return 0 Success (prints size or empty if unknown)
get_remote_size_bytes() {
local url="$1"
local size=""
# > wget spider to fetch headers
# Some servers may not provide Content-Length; handle gracefully.
local headers
headers="$(wget --spider --server-response --timeout="${WGET_TIMEOUT_SEC}" --tries=2 "${url}" 2>&1 || true)"
# Try to locate Content-Length
size="$(printf '%s\n' "${headers}" | awk -F': ' 'tolower($1)==" content-length" {gsub("\r","",$2); print $2}' | tail -n 1)"
if [[ -n "${size}" && "${size}" =~ ^[0-9]+$ ]]; then
printf '%s' "${size}"
else
printf '%s' ""
fi
}
### Get local file size in bytes
# @param file_path string Local file path
# @return 0 Success (prints size)
get_local_size_bytes() {
local file_path="$1"
stat -c '%s' "${file_path}"
}
### Verify local file size equals remote (if remote size known)
# @param file_path string Local file path
# @param remote_size string Remote size bytes (may be empty)
# @return 0 Success
verify_file_size() {
local file_path="$1"
local remote_size="$2"
[[ -f "${file_path}" ]] || die "File not found: ${file_path}"
if [[ -z "${remote_size}" ]]; then
# > Cannot verify by size; at least ensure file is non-empty
local local_size
local_size="$(get_local_size_bytes "${file_path}")"
[[ "${local_size}" -gt 0 ]] || die "Downloaded file is empty: ${file_path}"
log_warn "Remote Content-Length missing; only checked non-empty: ${file_path} (${local_size} bytes)"
return 0
fi
local local_size
local_size="$(get_local_size_bytes "${file_path}")"
if [[ "${local_size}" != "${remote_size}" ]]; then
die "Size mismatch for ${file_path}: local=${local_size}, remote=${remote_size}"
fi
log_info "Verified: ${file_path} (size=${local_size} bytes)"
}
### Download a single URL into workdir with resume + retries
# @param url string Remote URL
# @return 0 Success
download_one() {
local url="$1"
local filename
filename="$(basename "${url}")"
local out_path="${WORKDIR}/${filename}"
log_info "Downloading: ${url}"
local remote_size
remote_size="$(get_remote_size_bytes "${url}")"
if [[ -n "${remote_size}" ]]; then
log_debug "Remote size: ${remote_size} bytes for ${filename}"
else
log_warn "Remote size unknown (no Content-Length): ${url}"
fi
# > Use --continue for resume, --tries for retries, --timeout to avoid hanging
# > Use -O to ensure deterministic output path
wget --continue \
--tries="${WGET_RETRIES}" \
--timeout="${WGET_TIMEOUT_SEC}" \
--output-document="${out_path}" \
"${url}"
# > Ensure fully downloaded
verify_file_size "${out_path}" "${remote_size}"
}
### Download all from list file (must all succeed)
# @param list_file string File contains URLs
# @return 0 Success
run_downloads() {
local list_file="$1"
[[ -f "${list_file}" ]] || die "Download list file not found: ${list_file}"
# > Read line by line (URL per line)
while IFS= read -r url; do
[[ -n "${url}" ]] || continue
download_one "${url}"
done < "${list_file}"
log_info "All downloads completed successfully."
}
### Parse Doris stream load response and decide success (增强输出txn/label等)
# @param resp_file string Response file path
# @return 0 Success, 1 Failure
parse_stream_load_response() {
local resp_file="$1"
[[ -f "${resp_file}" ]] || die "Response file not found: ${resp_file}"
local status message txn_id label load_rows filtered_rows load_bytes
status="$(json_get_string "${resp_file}" "Status")"
message="$(json_get_string "${resp_file}" "Message")"
txn_id="$(json_get_number "${resp_file}" "TxnId")"
label="$(json_get_string "${resp_file}" "Label")"
load_rows="$(json_get_number "${resp_file}" "NumberLoadedRows")"
filtered_rows="$(json_get_number "${resp_file}" "NumberFilteredRows")"
load_bytes="$(json_get_number "${resp_file}" "LoadBytes")"
# > structured summary (easy to grep)
log_info "StreamLoadResp status=${status:-N/A} txn_id=${txn_id:-N/A} label=${label:-N/A} loaded=${load_rows:-N/A} filtered=${filtered_rows:-N/A} bytes=${load_bytes:-N/A}"
if [[ "${status}" == "Success" ]]; then
log_info "Stream Load Success. Message=${message:-N/A}"
return 0
fi
log_error "Stream Load Failed. Status=${status:-Unknown} Message=${message:-N/A}"
log_error "Full response saved at: ${resp_file}"
return 1
}
### Stream load a single CSV file to Doris (改:不再加 -H label改用本地 request_id 追踪)
# @param csv_path string Local CSV file path
# @return 0 Success
stream_load_one() {
local csv_path="$1"
[[ -f "${csv_path}" ]] || die "CSV not found: ${csv_path}"
local url="http://${DORIS_BE_IP}:${DORIS_BE_PORT}/api/${DORIS_DB}/${DORIS_TABLE}/_stream_load"
# > local trace id to correlate logs/response/files
local request_id
request_id="$(gen_request_id "${csv_path}")"
# > persist full response for tracing
local resp_file="${STREAMLOAD_LOG_DIR}/${request_id}.json"
log_info "Stream loading: ${csv_path} -> ${url} (request_id=${request_id})"
log_info "Response will be saved: ${resp_file}"
# > NOTE: do NOT set label header (per requirement)
curl --location-trusted \
--silent --show-error --fail-with-body \
--max-time "${CURL_TIMEOUT_SEC}" \
-u "${DORIS_USER}:${DORIS_PASS}" \
-H "Expect:100-continue" \
-H "column_separator:${COLUMN_SEPARATOR}" \
-T "${csv_path}" \
-X PUT \
"${url}" > "${resp_file}"
parse_stream_load_response "${resp_file}"
}
### Stream load all CSVs in workdir that match prefix list
# @param list_file string Download list file (to know exact filenames)
# @return 0 Success
run_stream_load() {
local list_file="$1"
[[ -f "${list_file}" ]] || die "Download list file not found: ${list_file}"
# > Ensure all expected files exist before loading
while IFS= read -r url; do
[[ -n "${url}" ]] || continue
local filename
filename="$(basename "${url}")"
local csv_path="${WORKDIR}/${filename}"
[[ -f "${csv_path}" ]] || die "Expected CSV missing (download not complete?): ${csv_path}"
done < "${list_file}"
log_info "All expected CSV files exist. Starting Stream Load..."
# > Load in the same order as list
while IFS= read -r url; do
[[ -n "${url}" ]] || continue
local filename
filename="$(basename "${url}")"
stream_load_one "${WORKDIR}/${filename}"
done < "${list_file}"
log_info "All Stream Load operations finished."
}
#===============================================================================
# Main
#===============================================================================
main() {
acquire_lock
parse_args "$@"
validate_env
prepare_workdir
build_download_list
case "${ACTION}" in
download)
# > Download only
run_downloads "${DOWNLOAD_LIST_FILE}"
;;
load)
# > Load only (expects files already present)
run_stream_load "${DOWNLOAD_LIST_FILE}"
;;
all)
# > Download then load
run_downloads "${DOWNLOAD_LIST_FILE}"
run_stream_load "${DOWNLOAD_LIST_FILE}"
;;
*)
die "Invalid action: ${ACTION}"
;;
esac
log_info "Done. (version=${SCRIPT_VERSION})"
}
main "$@"

View File

@@ -0,0 +1,37 @@
请以Bash Shell脚本高级开发工程师的身份严格遵循以下编程规范实现指定功能
1. 代码结构规范
- 符合POSIX标准与Bash最佳实践v5.0+
- 实现清晰的模块划分和函数封装
- 采用防御性编程策略处理异常情况
- 包含完善的错误处理机制trap、set -euo pipefail
2. 函数设计标准
- 函数声明需包含: 功能描述段(使用###注释块 参数说明:@param <变量名> <数据类型> <用途说明> 返回值说明:@return <退出码> <状态描述> 环境依赖:@require <依赖项>
- 函数参数命名采用snake_case格式体现语义化特征
3. 文档规范
- 主脚本头部包含: 元数据声明(作者、版本、许可证) 全局常量定义区 模块依赖说明
- 关键算法步骤添加行内注释(# > 开头)
- 维护完整的函数调用关系图使用ASCII流程图
4. 质量保障
- 通过ShellCheck进行静态检测
- 统一的日志函数实现详细的日志分级输出DEBUG/INFO/WARN/ERROR
1、在一台能访问doris的服务器下载csv文件
2、修改以下指令的变量文件名、doris ip和port执行就导入完成了
全国数据共6个文件5.6G后缀从0到5
csv文件地址https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/result_2aee9754dd304ca1-a0651901906f9bb4_0.csv
单独地域的文件 https://oss.demo.uavcmlc.com/cmlc-installation/doris/all/xiongan.csv
导入指令curl --location-trusted -u root: -H "Expect:100-continue" -H "column_separator:," -T ${table_name}.csv -XPUT http://${doris_be_ip}:${doris_be_8040_port}/api/cmii/dwd_reg_grid_city_detail_dd/_stream_load
使用WGET下载需要确保所有文件全部下载完成才能进行导入
result_2aee9754dd304ca1-a0651901906f9bb4_0.csv 此名称可能变化,需要作为变量提出
DORIS的地址、端口等需要作为公共变量提出

View File

@@ -0,0 +1,8 @@
10.22.57.8 master-10-22-57-8
10.22.57.5 worker-1-10-22-57-5
10.22.57.6 worker-2-10-22-57-6
10.22.57.7 worker-3-10-22-57-7
10.22.57.3 worker-4-10-22-57-3
10.22.57.4 worker-5-10-22-57-4
hostnamectl set-hostname

View File

@@ -0,0 +1 @@
eyJhbGciOiJSUzI1NiIsImtpZCI6IlBtUkhncHJ6T0ZUYnItM0VnMXpVUlZCWllOSVZWZ0Y0WEN3Sk5KUkoxY2MifQ.eyJhdWQiOlsidW5rbm93biJdLCJleHAiOjE4NjMzOTczMDYsImlhdCI6MTc2ODc4OTMwNiwiaXNzIjoicmtlIiwianRpIjoiOWQxNjM2MTktZDhjNi00MjQzLTgxN2MtMGYzOWYxYzFlNGExIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNmUyOTQyM2QtZTYxNC00ZDdhLWI0MjAtNTVkZGQwMjZhYWQ4In19LCJuYmYiOjE3Njg3ODkzMDYsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.FSTYBTJhdrPz_dfcok_QTJCCX_u9k2iCmzkMwrE2NaxyEGFzbSoa8rpqN1yX6fO1GPfJX1UwAqsXqVXSEFOyCbdKt702zWonkUhIqJg7E48E8807Ta6Dc1QGNH4UHxuheFCPlqf9ZoTVF9lV0oWFHdxT-b5Z_ZuTshJ7pHE7AuZgOtLvzdpp4qU7CbMPWfkPUMh85fE9kzOCpbJsrN1ccnTMfjAJ8u1w_rq9Lzk2IqiuaiwSD95dNJAjKWdt8qgtDUbAbrRja_o5BdPzPuaEHRSI-aNNMgguxovvmGs7GX0mYqLlsU-SPRlkDU02Td_7Av48Ckh1IBvkHIKoAbhZgw

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: xa-dcity-uas-260116
name: helm-minio
spec:
serviceName: helm-minio
replicas: 1
selector:
matchLabels:
app: helm-minio
template:
metadata:
labels:
app: helm-minio
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: minio-deploy
operator: In
values:
- "true"
containers:
- name: minio
image: 10.22.57.8:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
command: ["/bin/sh", "-c"]
args:
- minio server /data --console-address ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ACCESS_KEY
value: "cmii"
- name: MINIO_SECRET_KEY
value: "B#923fC7mk"
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumes:
- name: data
# persistentVolumeClaim:
# claimName: helm-minio
hostPath:
path: /var/lib/docker/minio-pv/
---
apiVersion: v1
kind: Service
metadata:
name: helm-minio
namespace: xa-dcity-uas-260116
spec:
selector:
app: helm-minio
ports:
- name: api
port: 9000
targetPort: 9000
nodePort: 39000
- name: console
port: 9001
targetPort: 9001
nodePort: 39001
type: NodePort

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,672 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "iot",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: xa-dcity-uas-260116
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "144.7.97.167:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}

View File

@@ -0,0 +1,315 @@
---
# ------------------- Dashboard Namespace ------------------- #
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
# ------------------- Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Service (NodePort 39999) ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
# ------------------- Dashboard Role (FIXED) ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
rules:
# [修复] 允许创建 Secrets解决 panic 问题
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# 允许对特定 Secrets 进行操作
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# ConfigMaps 权限
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Metrics 权限
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# ------------------- Dashboard RoleBinding ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: 10.22.57.8:8033/cmii/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ------------------- Metrics Scraper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
# ------------------- Metrics Scraper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 10.22.57.8:8033/cmii/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
volumes:
- name: tmp-volume
emptyDir: {}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ==================================================================
# 自定义用户配置部分 (ADMIN & READ-ONLY)
# ==================================================================
# ------------------- 1. Admin User (全部权限) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: read-only-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dashboard-view-with-logs
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: read-only-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dashboard-view-with-logs
subjects:
- kind: ServiceAccount
name: read-only-user
namespace: kubernetes-dashboard

View File

@@ -0,0 +1,639 @@
---
# ============== Secret - 密码管理 ==============
apiVersion: v1
kind: Secret
metadata:
name: emqx-credentials
namespace: xa-dcity-uas-260116
type: Opaque
stringData:
# Dashboard管理员密码
dashboard-admin-password: "odD8#Ve7.B"
# MQTT用户密码
mqtt-admin-password: "odD8#Ve7.B"
---
# ============== ServiceAccount ==============
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
---
# ============== Role - RBAC ==============
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
rules:
- apiGroups: [""]
resources:
- endpoints
- pods
verbs:
- get
- watch
- list
---
# ============== RoleBinding ==============
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: xa-dcity-uas-260116
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
# ============== ConfigMap - Bootstrap配置文件 ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-bootstrap-config
namespace: xa-dcity-uas-260116
data:
# 主配置文件 - 覆盖默认配置
emqx.conf: |
# 节点配置
node {
name = "emqx@${POD_NAME}.helm-emqxs-headless.xa-dcity-uas-260116.svc.cluster.local"
cookie = "emqx-cluster-cookie-secret"
data_dir = "/opt/emqx/data"
}
# 集群配置
cluster {
name = emqxcl
# 单节点 建议为 manual 多节点为k8s
discovery_strategy = manual
k8s {
apiserver = "https://kubernetes.default.svc.cluster.local:443"
service_name = "helm-emqxs-headless"
# 这里可以改为 hostname
address_type = dns
namespace = "xa-dcity-uas-260116"
suffix = "svc.cluster.local"
}
}
# 日志配置
log {
console {
enable = true
level = info
}
file {
enable = true
level = warning
path = "/opt/emqx/log"
}
}
# Dashboard配置
dashboard {
listeners.http {
bind = "0.0.0.0:18083"
}
default_username = "admin"
default_password = "public"
}
# 监听器配置
listeners.tcp.default {
bind = "0.0.0.0:1883"
max_connections = 1024000
}
listeners.ws.default {
bind = "0.0.0.0:8083"
max_connections = 1024000
websocket.mqtt_path = "/mqtt"
}
listeners.ssl.default {
bind = "0.0.0.0:8883"
max_connections = 512000
}
# 认证配置 - 使用内置数据库
authentication = [
{
mechanism = password_based
backend = built_in_database
user_id_type = username
password_hash_algorithm {
name = sha256
salt_position = suffix
}
# Bootstrap文件路径 - 用于初始化用户
bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
bootstrap_type = plain
}
]
# 授权配置
authorization {
no_match = deny
deny_action = disconnect
sources = [
{
type = built_in_database
enable = true
}
]
}
# MQTT协议配置
mqtt {
max_packet_size = "1MB"
max_clientid_len = 65535
max_topic_levels = 128
max_qos_allowed = 2
max_topic_alias = 65535
retain_available = true
wildcard_subscription = true
shared_subscription = true
}
---
# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-bootstrap-users
namespace: xa-dcity-uas-260116
data:
bootstrap_users.json: |
[
{ "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
{ "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
]
# 【修改点】既然有jq这里使用标准的 JSON 数组格式,最不容易出错
bootstrap_acl.json: |
[
{
"username": "admin",
"rules": [
{"action": "all", "permission": "allow", "topic": "#"}
]
},
{
"username": "cmlc",
"rules": [
{"action": "publish", "permission": "allow", "topic": "#"},
{"action": "subscribe", "permission": "allow", "topic": "#"}
]
}
]
---
# ============== ConfigMap - 初始化脚本 (修正版) ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-init-dashboard
namespace: xa-dcity-uas-260116
data:
init-dashboard.sh: |
#!/bin/bash
set -e
DASHBOARD_USER="admin"
DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
EMQX_API="http://localhost:18083/api/v5"
ACL_FILE="/bootstrap/bootstrap_acl.json"
# 辅助函数:打印带时间戳的日志
log() {
echo "[$(date +'%H:%M:%S')] $1"
}
log "======================================"
log "初始化 Dashboard 与 ACL (Debug Version)"
log "======================================"
# ----------------------------------------------------------------
# 1. 等待 EMQX API 就绪
# ----------------------------------------------------------------
log "[1/4] 等待 EMQX API 就绪..."
for i in $(seq 1 60); do
if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
log "✓ EMQX API 已就绪"
break
fi
if [ $i -eq 60 ]; then
log "✗ EMQX API 启动超时"
exit 1
fi
sleep 5
done
# ----------------------------------------------------------------
# 2. 修改 Dashboard 密码
# ----------------------------------------------------------------
log "[2/4] 检查/更新 Dashboard 密码..."
# 获取 Token (尝试默认密码)
LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
if [ -n "$TOKEN" ]; then
log " 检测到默认密码,正在更新..."
curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
log " ✓ Dashboard 密码已更新"
else
log " 无法使用默认密码登录,跳过更新(可能已修改)"
fi
# ----------------------------------------------------------------
# 3. 导入 ACL 规则
# ----------------------------------------------------------------
echo "[3/3] 导入ACL规则..."
# 重新登录获取最新 Token
LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
if [ -z "$TOKEN" ]; then
echo " ✗ 无法获取Token请检查密码设置"
exit 0
fi
if [ -f "$ACL_FILE" ]; then
echo " 正在解析 ACL 文件: $ACL_FILE"
if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
exit 0
fi
jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
USERNAME=$(echo "$user_config" | jq -r '.username // empty')
# ✅ PUT/POST 都需要 username + rulesusername 是 required
REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
if [ -z "$USERNAME" ]; then
echo " ✗ ACL 条目缺少 username跳过"
continue
fi
echo " 配置用户 ${USERNAME} 的ACL规则..."
# 1) 优先 PUT覆盖更新
http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
-X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "$REQ_BODY")
if [ "$http_code" = "204" ]; then
echo " ✓ PUT 更新成功"
elif [ "$http_code" = "404" ]; then
# 2) 不存在则 POST 创建
http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
-X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "$REQ_BODY")
if [ "$http_code2" = "204" ]; then
echo " ✓ POST 创建成功"
else
echo " ✗ POST 失败 (HTTP ${http_code2})$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
exit 1
fi
else
echo " ✗ PUT 失败 (HTTP ${http_code})$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
exit 1
fi
# 3) 导入后验证(可选但强烈建议保留)
verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
-H "Authorization: Bearer ${TOKEN}" \
"${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
if [ "$verify_code" = "200" ]; then
echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
else
echo " ✗ 验证失败 (HTTP ${verify_code})$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
exit 1
fi
done
echo " ✓ ACL 规则导入完成"
else
echo " 未找到 ACL 文件"
fi
---
# ============== StatefulSet ==============
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
spec:
replicas: 1
serviceName: helm-emqxs-headless
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- demo
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: cmii.app
operator: In
values:
- helm-emqxs
topologyKey: kubernetes.io/hostname
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
securityContext:
fsGroup: 1000
runAsUser: 1000
# InitContainer - 准备bootstrap文件
initContainers:
- name: prepare-bootstrap
image: 10.22.57.8:8033/cmii/tools:1.0
imagePullPolicy: IfNotPresent
# =========================================================
# 权限: 必须以 root 身份运行才能 chown
# =========================================================
securityContext:
runAsUser: 0
command:
- /bin/sh
- -c
- |
echo "准备bootstrap文件..."
# 创建数据目录
mkdir -p /opt/emqx/data
# 复制bootstrap文件到数据目录
# 只在文件不存在时复制,避免覆盖已有数据
if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
echo "✓ 已复制用户bootstrap文件"
else
echo " 用户bootstrap文件已存在跳过"
fi
# 设置权限 (现在有root权限可以成功)
chown -R 1000:1000 /opt/emqx/data
echo "✓ Bootstrap准备完成"
volumeMounts:
- name: emqx-data
mountPath: /opt/emqx/data
- name: bootstrap-users
mountPath: /bootstrap-src
containers:
# 主容器 - EMQX
- name: emqx
image: 10.22.57.8:8033/cmii/emqx:5.8.8
imagePullPolicy: IfNotPresent
env:
# Pod信息
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: EMQX_DATA_DIR
value: "/opt/emqx/data"
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: ws
containerPort: 8083
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "2000m"
memory: "2Gi"
livenessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
startupProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 30
volumeMounts:
- name: emqx-data
mountPath: /opt/emqx/data
# 使用 subPath 挂载单个配置文件,避免覆盖目录
- name: bootstrap-config
mountPath: /opt/emqx/etc/emqx.conf
subPath: emqx.conf
# Sidecar - 初始化Dashboard密码和ACL
- name: init-dashboard
image: 10.22.57.8:8033/cmii/tools:1.0
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
# 等待主容器启动
echo "等待EMQX启动..."
sleep 20
# 执行初始化
/bin/sh /scripts/init-dashboard.sh
# 保持运行
echo "初始化完成,进入守护模式..."
while true; do sleep 3600; done
env:
- name: DASHBOARD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: emqx-credentials
key: dashboard-admin-password
resources:
requests:
cpu: "100m"
memory: "64Mi"
limits:
cpu: "200m"
memory: "128Mi"
volumeMounts:
- name: init-script
mountPath: /scripts
- name: bootstrap-users
mountPath: /bootstrap
volumes:
- name: bootstrap-config
configMap:
name: emqx-bootstrap-config
- name: bootstrap-users
configMap:
name: emqx-bootstrap-users
- name: init-script
configMap:
name: emqx-init-dashboard
defaultMode: 0755
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
---
# ============== Service - Headless ==============
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
targetPort: 1883
- name: mqttssl
port: 8883
targetPort: 8883
- name: ws
port: 8083
targetPort: 8083
- name: dashboard
port: 18083
targetPort: 18083
- name: ekka
port: 4370
targetPort: 4370
---
# ============== Service - NodePort ==============
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
targetPort: 1883
nodePort: 31883
- name: dashboard
port: 18083
targetPort: 18083
nodePort: 38085
- name: ws
port: 8083
targetPort: 8083
nodePort: 38083
- name: mqttssl
port: 8883
targetPort: 8883
nodePort: 38883

View File

@@ -0,0 +1,376 @@
---
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
data:
# 集群相关
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__NAMESPACE: "xa-dcity-uas-260116"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
# 关闭匿名,默认 ACL 不匹配拒绝
EMQX_AUTH__ALLOW_ANONYMOUS: "false"
EMQX_AUTHZ__NO_MATCH: "deny"
# Dashboard 初始管理员密码(只在第一次启动时生效)
EMQX_DASHBOARD__DEFAULT_PASSWORD: "odD8#Ve7.B"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-init-script
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
data:
init-mqtt-user.sh: |
#!/bin/sh
set -e
DASHBOARD_USER="admin"
DASHBOARD_PASS="odD8#Ve7.B"
MQTT_USER="admin"
MQTT_PASS="odD8#Ve7.B"
# 等待 EMQX 本地 API 就绪
EMQX_API="http://localhost:18083/api/v5"
echo "等待 EMQX API 就绪..."
for i in $(seq 1 120); do
if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
echo "EMQX API 已就绪"
break
fi
echo "等待中... ($i/120)"
sleep 5
done
# 修改 Dashboard 管理员密码
echo "修改 Dashboard 管理员密码..."
/opt/emqx/bin/emqx ctl admins passwd "${DASHBOARD_USER}" "${DASHBOARD_PASS}" || echo "密码可能已设置"
echo "Dashboard 密码设置完成"
# 获取 Dashboard Token
echo "获取 Dashboard Token..."
TOKEN=$(curl -s -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}" \
| grep -o '"token":"[^"]*' | cut -d'"' -f4)
if [ -z "$TOKEN" ]; then
echo "ERROR: 无法获取 Token"
exit 1
fi
echo "Token 获取成功"
# 创建内置数据库认证器(使用 listeners 作用域)
echo "检查并创建内置数据库认证器..."
# 为 tcp:default listener 添加认证器
echo "为 listener tcp:default 配置认证器..."
curl -s -X POST "${EMQX_API}/authentication/tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"mechanism": "password_based",
"backend": "built_in_database",
"user_id_type": "username",
"password_hash_algorithm": {
"name": "sha256",
"salt_position": "suffix"
}
}' 2>/dev/null || echo "tcp:default 认证器可能已存在"
# 为 ws:default listener 添加认证器
echo "为 listener ws:default 配置认证器..."
curl -s -X POST "${EMQX_API}/authentication/ws:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"mechanism": "password_based",
"backend": "built_in_database",
"user_id_type": "username",
"password_hash_algorithm": {
"name": "sha256",
"salt_position": "suffix"
}
}' 2>/dev/null || echo "ws:default 认证器可能已存在"
# 等待认证器创建完成
sleep 2
# 创建 MQTT 用户
echo "创建 MQTT 用户: ${MQTT_USER}..."
curl -s -X POST "${EMQX_API}/authentication/password_based:built_in_database/users?listener_id=tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"user_id\":\"${MQTT_USER}\",\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
2>/dev/null || echo "用户可能已存在,尝试更新..."
# 尝试更新密码
curl -s -X PUT "${EMQX_API}/authentication/password_based:built_in_database/users/${MQTT_USER}?listener_id=tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
2>/dev/null || true
echo "MQTT 用户创建/更新完成"
# 创建授权规则
echo "配置授权规则..."
# 创建内置数据库授权源
curl -s -X POST "${EMQX_API}/authorization/sources" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"type": "built_in_database",
"enable": true
}' 2>/dev/null || echo "授权源可能已存在"
sleep 2
# 为 admin 用户添加授权规则(使用数组格式)
echo "为 ${MQTT_USER} 用户添加 ACL 规则..."
curl -s -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "[{\"username\":\"${MQTT_USER}\",\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}]" \
2>/dev/null && echo "ACL 规则创建成功" || echo "规则可能已存在,尝试更新..."
# 尝试更新规则PUT 请求需要单个对象,不是数组)
curl -s -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${MQTT_USER}" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}" \
2>/dev/null && echo "ACL 规则更新成功" || true
echo "ACL 规则配置完成"
echo "初始化完成MQTT 用户: ${MQTT_USER}"
echo "可通过以下方式连接:"
echo " - MQTT: localhost:1883"
echo " - WebSocket: localhost:8083"
echo " - Dashboard: http://localhost:18083"
echo " - 用户名: ${MQTT_USER}"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- demo
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 10.22.57.8:8033/cmii/emqx:5.8.8
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
# 添加生命周期钩子
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- |
# 后台执行初始化脚本,避免阻塞容器启动
nohup /bin/sh /scripts/init-mqtt-user.sh > /tmp/init.log 2>&1 &
# 添加健康检查,确保 initContainer 执行时 API 已就绪
livenessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 5
resources: {}
volumeMounts:
# 5.x 默认 data 目录,包含所有持久化数据
- name: emqx-data
mountPath: "/opt/emqx/data"
readOnly: false
- name: init-script
mountPath: /scripts
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: init-script
configMap:
name: helm-emqxs-init-script
defaultMode: 0755
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
rules:
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: xa-dcity-uas-260116
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

View File

@@ -0,0 +1,203 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: xa-dcity-uas-260116
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uas
namespace: xa-dcity-uas-260116
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.2
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uas
image: 10.22.57.8:8033/cmii/cmii-uav-platform-uas:2.2.0-pro-20251223
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: xa-dcity-uas-260116
- name: APPLICATION_NAME
value: cmii-uav-platform-uas
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uas
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uas
namespace: xa-dcity-uas-260116
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.2
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uasms
namespace: xa-dcity-uas-260116
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.2
spec:
replicas: 0
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uasms
image: 10.22.57.8:8033/cmii/cmii-uav-platform-uasms:2.2.0-pro-20251223
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: xa-dcity-uas-260116
- name: APPLICATION_NAME
value: cmii-uav-platform-uasms
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uasms
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uasms
namespace: xa-dcity-uas-260116
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.2
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528

View File

@@ -0,0 +1,995 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: xa-dcity-uas-260116
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
nginx.ingress.kubernetes.io/rewrite-target: /$1
spec:
rules:
- host: fake-domain.xa-dcity-uas-260116.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform
port:
number: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-platform-supervision
port:
number: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-platform-supervisionh5
port:
number: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform
port:
number: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-ai-brain
port:
number: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-armypeople
port:
number: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-awareness
port:
number: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-base
port:
number: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-blockchain
port:
number: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-classification
port:
number: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-cms-portal
port:
number: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-detection
port:
number: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-dikongzhixingh5
port:
number: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-dispatchh5
port:
number: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-emergency-rescue
port:
number: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-eventsh5
port:
number: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-flight-control
port:
number: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-hljtt
port:
number: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-hyperspectral
port:
number: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-iot-manager
port:
number: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-jiangsuwenlv
port:
number: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-logistics
port:
number: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-media
port:
number: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-mianyangbackend
port:
number: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-multiterminal
port:
number: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-mws
port:
number: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-oms
port:
number: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-open
port:
number: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-pilot2-to-cloud
port:
number: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-qingdao
port:
number: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-qinghaitourism
port:
number: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-renyike
port:
number: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-scanner
port:
number: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-security
port:
number: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-securityh5
port:
number: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-seniclive
port:
number: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-share
port:
number: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-smauth
port:
number: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-smsecret
port:
number: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-splice
port:
number: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-threedsimulation
port:
number: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-traffic
port:
number: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uas
port:
number: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uaskny
port:
number: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uasms
port:
number: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uasmskny
port:
number: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-visualization
port:
number: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-platform-manager
port:
number: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-platform-security-center
port:
number: 9528
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: xa-dcity-uas-260116
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
spec:
rules:
- host: cmii-admin-data.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-data
port:
number: 8080
- host: cmii-admin-gateway.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-gateway
port:
number: 8080
- host: cmii-admin-user.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-user
port:
number: 8080
- host: cmii-app-release.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-app-release
port:
number: 8080
- host: cmii-open-gateway.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-open-gateway
port:
number: 8080
- host: cmii-sky-converge.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-sky-converge
port:
number: 8080
- host: cmii-suav-supervision.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-supervision
port:
number: 8080
- host: cmii-uas-datahub.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-datahub
port:
number: 8080
- host: cmii-uas-gateway.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-gateway
port:
number: 8080
- host: cmii-uas-lifecycle.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-lifecycle
port:
number: 8080
- host: cmii-uav-advanced5g.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-advanced5g
port:
number: 8080
- host: cmii-uav-airspace.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-airspace
port:
number: 8080
- host: cmii-uav-alarm.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-alarm
port:
number: 8080
- host: cmii-uav-autowaypoint.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-autowaypoint
port:
number: 8080
- host: cmii-uav-brain.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-brain
port:
number: 8080
- host: cmii-uav-bridge.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-bridge
port:
number: 8080
- host: cmii-uav-cloud-live.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-cloud-live
port:
number: 8080
- host: cmii-uav-clusters.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-clusters
port:
number: 8080
- host: cmii-uav-cms.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-cms
port:
number: 8080
- host: cmii-uav-data-post-process.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-data-post-process
port:
number: 8080
- host: cmii-uav-depotautoreturn.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-depotautoreturn
port:
number: 8080
- host: cmii-uav-developer.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-developer
port:
number: 8080
- host: cmii-uav-device.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-device
port:
number: 8080
- host: cmii-uav-emergency.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-emergency
port:
number: 8080
- host: cmii-uav-fwdd.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-fwdd
port:
number: 8080
- host: cmii-uav-gateway.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gateway
port:
number: 8080
- host: cmii-uav-gis-server.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gis-server
port:
number: 8080
- host: cmii-uav-grid-datasource.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-datasource
port:
number: 8080
- host: cmii-uav-grid-engine.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-engine
port:
number: 8080
- host: cmii-uav-grid-manage.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-manage
port:
number: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-industrial-portfolio
port:
number: 8080
- host: cmii-uav-integration.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-integration
port:
number: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-iot-dispatcher
port:
number: 8080
- host: cmii-uav-iot-manager.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-iot-manager
port:
number: 8080
- host: cmii-uav-kpi-monitor.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-kpi-monitor
port:
number: 8080
- host: cmii-uav-logger.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-logger
port:
number: 8080
- host: cmii-uav-material-warehouse.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-material-warehouse
port:
number: 8080
- host: cmii-uav-mission.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-mission
port:
number: 8080
- host: cmii-uav-mqtthandler.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-mqtthandler
port:
number: 8080
- host: cmii-uav-multilink.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-multilink
port:
number: 8080
- host: cmii-uav-notice.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-notice
port:
number: 8080
- host: cmii-uav-oauth.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-oauth
port:
number: 8080
- host: cmii-uav-process.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-process
port:
number: 8080
- host: cmii-uav-sec-awareness.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sec-awareness
port:
number: 8080
- host: cmii-uav-security-trace.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-security-trace
port:
number: 8080
- host: cmii-uav-sense-adapter.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sense-adapter
port:
number: 8080
- host: cmii-uav-surveillance.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-surveillance
port:
number: 8080
- host: cmii-uav-sync.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sync
port:
number: 8080
- host: cmii-uav-tcp-server.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-tcp-server
port:
number: 8080
- host: cmii-uav-threedsimulation.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-threedsimulation
port:
number: 8080
- host: cmii-uav-tower.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-tower
port:
number: 8080
- host: cmii-uav-user.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-user
port:
number: 8080
- host: cmii-uav-watchdog.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-watchdog
port:
number: 8080
- host: cmii-uav-waypoint.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-waypoint
port:
number: 8080
- host: cmii-uavms-pyfusion.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-pyfusion
port:
number: 8080
- host: cmii-uavms-security-center.uavcloud-xadcity-uas.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-security-center
port:
number: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: xa-dcity-uas-260116
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
spec:
rules:
- host: fake-domain.xa-dcity-uas-260116.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-gateway
port:
number: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-open-gateway
port:
number: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gateway
port:
number: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-gateway
port:
number: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-sky-converge
port:
number: 8080

View File

@@ -0,0 +1,826 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: xa-dcity-uas-260116
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/iot)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.xa-dcity-uas-260116.io
http:
paths:
- path: /260116/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /260116/supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /260116/supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /260116/pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /260116/ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /260116/armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /260116/awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /260116/base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /260116/blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /260116/classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /260116/cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /260116/detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /260116/dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /260116/dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /260116/emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /260116/eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /260116/flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /260116/hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /260116/hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /260116/iot/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-iot-manager
servicePort: 9528
- path: /260116/jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /260116/logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /260116/media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /260116/mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /260116/multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /260116/mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /260116/oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /260116/open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /260116/pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /260116/qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /260116/qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /260116/renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /260116/scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /260116/security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /260116/securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /260116/seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /260116/share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /260116/smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /260116/smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /260116/splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /260116/threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /260116/traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /260116/uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /260116/uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uaskny
servicePort: 9528
- path: /260116/uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /260116/uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasmskny
servicePort: 9528
- path: /260116/visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /260116/uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /260116/secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: xa-dcity-uas-260116
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-datahub.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-datahub
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-iot-manager.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-manager
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-tcp-server.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tcp-server
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-watchdog.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-watchdog
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-pyfusion.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-pyfusion
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-260116.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: xa-dcity-uas-260116
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.xa-dcity-uas-260116.io
http:
paths:
- path: /260116/oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /260116/open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /260116/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /260116/uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /260116/converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: xa-dcity-uas-260116
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: xa-dcity-uas-260116
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 10.22.57.8:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 10.22.57.8:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 10.22.57.8:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/xa-dcity-uas-260116/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: xa-dcity-uas-260116
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.2
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: xa-dcity-uas-260116
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.2
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: xa-dcity-uas-260116
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.2
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: uas-2.2
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 10.22.57.8:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 10.22.57.8:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 10.22.57.8:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 10.22.57.4
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 10.22.57.4
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.2
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.2
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.2
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: xa-dcity-uas-260116
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.2
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: xa-dcity-uas-260116
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xa-dcity-uas-260116
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xa-dcity-uas-260116
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xa-dcity-uas-260116
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 10.22.57.8:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 10.22.57.8:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xa-dcity-uas-260116
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xa-dcity-uas-260116
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.22.57.8:8033/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: xa-dcity-uas-260116
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xa-dcity-uas-260116
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 10.22.57.8:8033/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.xa-dcity-uas-260116.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: xa-dcity-uas-260116
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://144.7.97.167:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 10.22.57.8:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 144.7.97.167
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: xa-dcity-uas-260116/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: xa-dcity-uas-260116/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 10.22.57.8:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://helm-minio:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: xa-dcity-uas-260116/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 10.22.57.8:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: xa-dcity-uas-260116
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: uas-2.2
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: uas-2.2
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: uas-2.2
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://144.7.97.167:31935'
rtsp: 'rtsp://144.7.97.167:30554'
srt: 'srt://144.7.97.167:30556'
flv: 'http://144.7.97.167:30500'
hls: 'http://144.7.97.167:30500'
rtc: 'webrtc://144.7.97.167:30080'
replay: 'https://144.7.97.167:30333'
minio:
endpoint: http://helm-minio:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,52 @@
#!/bin/bash
export tenant_name=outside
export inner_master_ip=helm-rabbitmq
export minio_host_ip=10.22.57.8
mc alias set ${tenant_name} http://${minio_host_ip}:39000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:5672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"

View File

@@ -0,0 +1,258 @@
nodes:
- address: 10.22.57.8
user: root
role:
- controlplane
- etcd
- worker
internal_address: 10.22.57.8
hostname_override: "master-10-22-57-8"
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 10.22.57.5
user: root
role:
- worker
internal_address: 10.22.57.5
hostname_override: "worker-1-10-22-57-5"
labels:
uavcloud.env: demo
- address: 10.22.57.6
user: root
role:
- worker
internal_address: 10.22.57.6
hostname_override: "worker-2-10-22-57-6"
labels:
uavcloud.env: demo
- address: 10.22.57.7
user: root
role:
- worker
internal_address: 10.22.57.7
hostname_override: "worker-3-10-22-57-7"
labels:
uavcloud.env: demo
- address: 10.22.57.3
user: root
role:
- worker
internal_address: 10.22.57.3
hostname_override: "worker-4-10-22-57-3"
labels:
mysql-deploy: true
- address: 10.22.57.4
user: root
role:
- worker
internal_address: 10.22.57.4
hostname_override: "worker-5-10-22-57-4"
labels:
minio-deploy: true
doris-deploy: true
authentication:
strategy: x509
sans:
- "10.22.57.8"
private_registries:
- url: 10.22.57.8:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.30.14-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: false
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 30500
https_port: 31500
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,60 @@
192.168.1.4
移动云:
MYDKFX2026
wrbvGO57
您的客户经理沈玉雪联系方式18380551056
2、公网IP地址36.133.66.183
3、服务器密码都是v6w6apLf7=
## 192.168.1.6 2TB
mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
# 主节点安装ssh-key
/usr/local/bin/agent-wdd base ssh config
/usr/local/bin/agent-wdd base ssh key
DEFAULT_HTTP_BACKEND_IP=$(kubectl -n ingress-nginx get svc default-http-backend -o jsonpath='{.spec.clusterIP}')
# 批量执行命令
host_list=(
192.168.1.3
192.168.1.5
192.168.1.2
192.168.1.6
)
for server in "${host_list[@]}";do
echo " ---> current ip is $server - $(hostname) \n"
ssh root@"$server" "DEFAULT_HTTP_BACKEND_IP='$DEFAULT_HTTP_BACKEND_IP' bash -s" <<EOF
echo "DEFAULT_HTTP_BACKEND_IP=$DEFAULT_HTTP_BACKEND_IP"
curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"
echo
EOF
echo ""
done
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
ssh root@${server} "echo yes"
ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
ssh root@${server} "apt-get update"
ssh root@${server} "apt-get install -y gparted"
apt-get install -y docker.io=20.10.12-0ubuntu4 containerd=1.7.28-0ubuntu1~22.04.1 docker-buildx=0.20.1-0ubuntu1~22.04.2 docker-compose=1.29.2-1

View File

@@ -0,0 +1,168 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="192.168.1.4:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke13014"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke13014/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/xauas22"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/xauas22/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke13014"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke13014/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/cmii/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
Download_Load_Tag_Upload "rke"
# Load_Tag_Upload "cmii"

View File

@@ -0,0 +1,82 @@
#!/bin/bash
harbor_host=192.168.1.4:8033
namespace=sc-my-uav-260202
app_name=""
new_tag=""
download_from_oss() {
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
echo "start to download => $1"
wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
echo ""
echo ""
}
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
download_from_oss "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -0,0 +1 @@
eyJhbGciOiJSUzI1NiIsImtpZCI6ImVldHZIMnEyWjJsTDV1TWNPNTZmZjF2d1RxeDgzdDNidHlWSXlIMGNaV3cifQ.eyJhdWQiOlsidW5rbm93biJdLCJleHAiOjE4NjQ3MTAxODAsImlhdCI6MTc3MDEwMjE4MCwiaXNzIjoicmtlIiwianRpIjoiY2Q2MTJmNzItOWY1NS00MGFiLTg3MjMtZGJjNTc3MmVhNTIyIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiOTcwOTlkNTctOWNiMS00YjcxLTkyNTMtOGI1OTUyNWJmZDBkIn19LCJuYmYiOjE3NzAxMDIxODAsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.pL6lP3HUh9RMfnA0JBha-GnWEON9nnltnLaNsamr1AIgUReN5hhl-IsD3rbhKUcHvm9sCy9cFkQxVbt81CapGN0zxgfNvlVzxzCdQZpd1vtmGp70FSaT7wQoBC7pAWSHU0FUEltMUKWGSBvy0ZtDHEvRilk4Ie4MVN89hCPNOEiZaIC2QagSl7oBd3ppJSnHAMB7eM3pJDP2OhKNWBddNqC0YuHdbk6JHTkfmF2Xe3CYLTYfK6A1GCfVXGUApHgiRB0Dq1BT9M7wcIb4ZOlo9kzvdbISDSyyMiwTZQzR-_VyifyBXCilsYGYb8TnUXUqOVCmuxaGzUJzXMQsPEfuGQ

View File

@@ -0,0 +1,215 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-fly-center
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/app-version: 6.0.0
app.kubernetes.io/managed-by: octopus
cmii.app: cmii-fly-center
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-fly-center
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-fly-center
cmii.type: backend
spec:
volumes:
- name: application-k8s
configMap:
name: cmii-fly-center-cm
items:
- key: application-k8s.yml
path: application-k8s.yml
defaultMode: 420
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
containers:
- name: cmii-fly-center
image: 192.168.1.4:8033/cmii/cmii-fly-center:.0.0-012601
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: K8S_NAMESPACE
value: ahydapp
- name: APPLICATION_NAME
value: cmii-fly-center
- name: CUST_JAVA_OPTS
value: '-Xms2000m -Xmx4500m -Dlog4j2.formatMsgNoLookups=true'
- name: NACOS_REGISTRY
value: helm-nacos:8848
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: developer
- name: NACOS_PASSWORD
value: Deve@9128201
- name: SPRING_PROFILES_ACTIVE
value: k8s,db,cache,message
resources:
limits:
cpu: '4'
memory: 6Gi
requests:
cpu: '4'
memory: 2Gi
volumeMounts:
- name: application-k8s
mountPath: /cmii/config/application-k8s.yml
subPath: application-k8s.yml
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: uavcloud-devflight/cmii-fly-center
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-fly-center
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/app-version: 6.0.0
app.kubernetes.io/managed-by: octopus
cmii.app: cmii-fly-center
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-fly-center
cmii.type: backend
type: ClusterIP
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-fly-center-cm
namespace: sc-my-uav-260202
data:
application-k8s.yml: |
center:
####################下面部分为中间件对应配置,需要确认!!!#######################
############使用k8s部署的中间件可以直接用k8s里面的服务名#################
db:
ip: helm-mysql
port: 3306
username: k8s_admin
password: fP#UaH6qQ3)8
rabbitmq:
ip: helm-rabbitmq
port: 5672
username: admin
password: nYcRN91r._hj
redis:
ip: helm-redis-master
port: 6379
password: Mcache@4522
mqtt:
BASIC:
ip: helm-emqxs
port: 1883 # mqtt内部1883端口
username: cmlc
password: odD8#Ve7.B
DRC:
ip: 36.133.66.183 # 设备连接的mqtt的公网IP
port: 31883 # 1883映射的公网端口
username: cmlc
password: odD8#Ve7.B
influxdb:
ip: helm-influxdb #influxdb宿主机的内部ip
port: 8086 #influxdb宿主机的端口
token: YunnHJASAAdj23rasQAWd621erGAS82kaqj
org: cmii
bucket: cmii
minio:
ip: helm-minio # minio服务的宿主机ip
port: 39000
access-key: cmii # minio的访问key
secret-key: B#923fC7mk # minio访问secret
publicEndpoint: http://36.133.66.183:39000
shareEndpoint: http://36.133.66.183:8088/center
hub:
appKey: Zhdjk*72uU^2xz@s

View File

@@ -0,0 +1,154 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uav-platform-lite
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/app-version: 6.2.0
cmii.app: cmii-uav-platform-lite
cmii.type: frontend
octopus.lite: frontend-app-wdd
annotations:
deployment.kubernetes.io/revision: '8'
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uav-platform-lite
cmii.type: frontend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uav-platform-lite
cmii.type: frontend
spec:
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
defaultMode: 420
- name: tenant-prefix
configMap:
name: tenant-prefix-lite
items:
- key: ingress-config.js
path: ingress-config.js
defaultMode: 420
containers:
- name: cmii-uav-platform-lite
image: 192.168.1.4:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
env:
- name: K8S_NAMESPACE
value: ahydapp
- name: APPLICATION_NAME
value: cmii-uav-platform-lite
resources:
limits:
cpu: '1'
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
mountPath: /home/cmii-platform/dist/ingress-config.js
subPath: ingress-config.js
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uav-platform-lite
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/version: 6.2.0
cmii.app: cmii-uav-platform-lite
cmii.type: frontend
octopus.control: frontend-app-wdd
spec:
ports:
- name: web-svc-port
protocol: TCP
port: 9528
targetPort: 9528
selector:
cmii.app: cmii-uav-platform-lite
cmii.type: frontend
type: ClusterIP
sessionAffinity: None
status:
loadBalancer: {}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-lite
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "lite",
AppClientId: "",
Headers: {
ORG_ID: 'pago',
PROJECT_ID: 'prgn'
},
TdtToken: "XXXX"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: sc-my-uav-260202
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}

View File

@@ -0,0 +1,247 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-sky-converge
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/app-version: 6.0.0
app.kubernetes.io/managed-by: octopus
cmii.app: cmii-sky-converge
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-sky-converge
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-sky-converge
cmii.type: backend
spec:
volumes:
- name: application-k8s
configMap:
name: cmii-sky-converge-cm
items:
- key: application-k8s.yml
path: application-k8s.yml
- key: simAuth.license
path: simAuth.license
defaultMode: 420
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
containers:
- name: cmii-sky-converge
image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: K8S_NAMESPACE
value: ahydapp
- name: APPLICATION_NAME
value: cmii-sky-converge
- name: CUST_JAVA_OPTS
value: '-Xms2000m -Xmx4500m -Dlog4j2.formatMsgNoLookups=true'
- name: NACOS_REGISTRY
value: helm-nacos:8848
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 5.7.0
- name: SYS_CONFIG_GROUP
value: 5.7.0
- name: IMAGE_VERSION
value: 5.7.0
- name: NACOS_USERNAME
value: developer
- name: NACOS_PASSWORD
value: Deve@9128201
- name: SPRING_PROFILES_ACTIVE
value: k8s,db,cache,message
resources:
limits:
cpu: '4'
memory: 6Gi
requests:
cpu: '4'
memory: 2Gi
volumeMounts:
- name: application-k8s
mountPath: /cmii/config/application-k8s.yml
subPath: application-k8s.yml
- name: application-k8s
mountPath: /cmii/config/simAuth.license
subPath: simAuth.license
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: uavcloud-devflight/cmii-sky-converge
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-sky-converge
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/app-version: 6.0.0
app.kubernetes.io/managed-by: octopus
cmii.app: cmii-sky-converge
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-sky-converge
cmii.type: backend
type: ClusterIP
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-sky-converge-cm
namespace: sc-my-uav-260202
data:
application-k8s.yml: |
converge:
####################下面部分为中间件对应配置,需要确认!!!#######################
############使用k8s部署的中间件可以直接用k8s里面的服务名#################
db:
ip: helm-mysql
port: 3306
username: k8s_admin
password: fP#UaH6qQ3)8
mqtt:
ip: helm-emqxs
port: 1883 # mqtt内部1883端口
username: cmlc
password: odD8#Ve7.B
rabbitmq:
ip: helm-rabbitmq
port: 5672
username: admin
password: nYcRN91r._hj
redis:
ip: helm-redis-master
port: 6379
password: Mcache@4522
influxdb:
ip: helm-influxdb #influxdb宿主机的内部ip
port: 8086 #influxdb宿主机的端口如果docker compose文件没有改动则默认不变
token: YunnHJASAAdj23rasQAWd621erGAS82kaqj
org: cmii
bucket: cmii
minio:
ip: helm-minio # minio服务的宿主机ip
port: 39000
access-key: cmii # minio的访问key
secret-key: B#923fC7mk # minio访问secret
#######################下面部分是业务服务需要的配置,需要确认!!!#######################
center:
address: http://cmii-fly-center:8080 # cmii-fly-center服务部署的容器宿主机ip地址和暴露的端口
stream:
endpoint: http://36.133.66.183:8088 #平台地址端口
buckets:
live-srs-hls: ilm-detect
storage:
endpoint: http://36.133.66.183:8088/converge # cmii-sky-converge服务的公网请求地址需要匹配到all-gateways-ingress里面converge服务的根路径
live:
merge:
tmp: /tmp/ffmpeg/
expired: 10
sms:
mas:
enable: false #内网部署改为false
host: http://XXX:XXX/sms/tmpsubmit
ecName: XXX科技有限公司
apId: notice
secretKey: notice@123
sign: ynYl2Vpl7
templateId: e4dc71ddd5c24d25b24daa01e969e24
expire: 3
limit:
minute: 5
hour: 15
day: 30
sim:
# true = 启用, false = 不启用
enable: false
###固定k8s里面挂载路径
licensePath: /cmii/config/simAuth.license
# 测试环境https://ptest.cmccsim.com:9090, 生产环境https://certplat.cmccsim.com
host: https://ptest.cmccsim.com:9090
callbackUrl: http://183.220.196.116:8088/converge
simAuth.license: >
BOOedo/TVLbYLdKyGkFYEAljoncjd2+mKkwARpNkb0Q8D0QaZbOnCjJdMj0kUtHVRJ03CYujyVJZ8Xc1JvBTujSFgBvNwXWJN2E35TZYGUYx4uZW7WZJ9ajp3pi9Q4V9JLA4qdyd/Zaz0/T+mqaXzW0l18jA9VL25fB0tkzQYpySql76V9QAowpuVcklItcNZ8YWwK4lbPjaygBhZVNqdhbJQwqLG7io2X0QV11T5yhbu8SXCag0hoX6s93IBz0k4Aze2TZvpJ25o/NuMptWKviddrVNpVAIwT/L9kLNVkBT8T0xysX6Ku+9aLKUlLrGw4lhAHM5iHp82jduw7L9jc878ZZgOoUALLaw9axnVdnf3XfhZ75/uhx4mZ+JnNS2aNH18mVR53CGT3jxY0y1RA64e2zhMhFr/KNxVGIuZl/iAr1EGI85QWrnYGsLNbilCFlZyDzcH8tK4hDvmMtUe1xCEUF6oO9nwr+YDHGBSM1ifXLJZrvwuDI7Zim+h6pUqctWhtf6eyfyF17iBrzzt6lmSjkQtZ1kRVUxRni68/FPH9YJBKQhJItAk2h1OaUBB1Lt5vfu8OYi5S+onTmesvlIuUk7USBIFbt4kVhUpgGtV+WyddcjH6BJo3NPqCYcObR4KeLmQ/bHmN/xyVT3HMed8VhiVv0U8EuTINJxmXh+nDVmeDEUa4qYtPRGArSsGF2KGbnOOqwkyk1D/o81Zxb8Kklxn3I/CK1EM63HZLY4hGm52oRsNDjbJPFFFUdTqyQw7igHdwJYJbgxqycCAh1f8zioVVziOXwHxV85poIpVG7pP0LWrYttW1e2WdrSI4WUO1X4krfPu+7WxYHj4Cs4aTflYM9F+KVqbw6bVlg5PIPRiIy6eMRqzvl53y9eesd7eqUNgRnM13PmRDJPe6sw5BnaPn1eHBk7Mh+CAsdRnq8V0t9NkRK2aNfJFNo/PPjahDlw9DHMnJW3QGgZNR3LqFKQxDQIpR7xwgsYX5CmZo3gaBHbTx1EozCagco1tGHrRaDlJNjAYKjnus0huujI0dh+w/ybkWoN4jPQiMWx5O/oem62ga5NbHd5wS/A5e9UKfNZef1NYJyiWRYNINXr3lUl0835rb38q6+5tBKZnrJq1GZ8n2IEuw8L1YcbvtuSDBlHYGSDQ6yD5sL/qv73sXjL2jwtu+QllQt6jhFw5VUKIFRhCjuTeLbrzmcOO2TwCVZb89QPW5rGNA5sO99qormwqNkwXzsKXNx6r9B2rQ6WUdP05r1ti0YAShBdfC1CPhpt2yuKIFO3eh8J6fWjHygBX2kYq+zdb4w39d77gBayQX5lIw4MY0Dcqbyw/MvqcnXj47EUmQ+xIxkaL76C4nbN7GuMQs1rpJ4eox9qqyBeKbVGo/7/aqP6vMXl0BsPWLV5Z2jCy0HzgyBMy4mU1Q==

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: sc-my-uav-260202
name: helm-minio
spec:
serviceName: helm-minio
replicas: 1
selector:
matchLabels:
app: helm-minio
template:
metadata:
labels:
app: helm-minio
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: minio-deploy
operator: In
values:
- "true"
containers:
- name: minio
image: 192.168.1.4:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
command: ["/bin/sh", "-c"]
args:
- minio server /data --console-address ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ACCESS_KEY
value: "cmii"
- name: MINIO_SECRET_KEY
value: "B#923fC7mk"
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumes:
- name: data
# persistentVolumeClaim:
# claimName: helm-minio
hostPath:
path: /var/lib/docker/minio-pv/
---
apiVersion: v1
kind: Service
metadata:
name: helm-minio
namespace: sc-my-uav-260202
spec:
selector:
app: helm-minio
ports:
- name: api
port: 9000
targetPort: 9000
nodePort: 39000
- name: console
port: 9001
targetPort: 9001
nodePort: 39001
type: NodePort

View File

@@ -0,0 +1,350 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-fly-center
namespace: sc-my-uav-260202
labels:
cmii.type: backend
cmii.app: cmii-fly-center
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-fly-center
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-fly-center
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-fly-center
image: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: sc-my-uav-260202
- name: APPLICATION_NAME
value: cmii-fly-center
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 2.0
- name: SYS_CONFIG_GROUP
value: 2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: IMAGE_NAME
value: 192.168.1.4:8033/cmii/cmii-fly-center:2.0.0-0126
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-fly-center
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-fly-center
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-fly-center
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-fly-center
resource: requests.memory
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: sc-my-uav-260202/cmii-fly-center
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-fly-center
namespace: sc-my-uav-260202
labels:
cmii.type: backend
cmii.app: cmii-fly-center
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 2.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-fly-center
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-sky-converge
namespace: sc-my-uav-260202
labels:
cmii.type: backend
cmii.app: cmii-sky-converge
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-sky-converge
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-sky-converge
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-sky-converge
image: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: sc-my-uav-260202
- name: APPLICATION_NAME
value: cmii-sky-converge
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 2.0
- name: SYS_CONFIG_GROUP
value: 2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: IMAGE_NAME
value: 192.168.1.4:8033/cmii/cmii-sky-converge:2.0.0-012601
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-sky-converge
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-sky-converge
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-sky-converge
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-sky-converge
resource: requests.memory
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
livenessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
startupProbe:
httpGet:
path: /cmii/health
port: pod-port
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 3
periodSeconds: 20
successThreshold: 1
failureThreshold: 5
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
readOnly: false
subPath: sc-my-uav-260202/cmii-sky-converge
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
---
apiVersion: v1
kind: Service
metadata:
name: cmii-sky-converge
namespace: sc-my-uav-260202
labels:
cmii.type: backend
cmii.app: cmii-sky-converge
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 2.0
spec:
type: ClusterIP
selector:
cmii.type: backend
cmii.app: cmii-sky-converge
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080

View File

@@ -0,0 +1,686 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-lite
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "lite",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: sc-my-uav-260202
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "260202",
CloudHOST: "36.133.66.183:8088",
ApplicationShortName: "iot",
AppClientId: "empty"
}

View File

@@ -0,0 +1,315 @@
---
# ------------------- Dashboard Namespace ------------------- #
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
# ------------------- Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Service (NodePort 39999) ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
# ------------------- Dashboard Role (FIXED) ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
rules:
# [修复] 允许创建 Secrets解决 panic 问题
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# 允许对特定 Secrets 进行操作
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# ConfigMaps 权限
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Metrics 权限
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# ------------------- Dashboard RoleBinding ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: 192.168.1.4:8033/cmii/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ------------------- Metrics Scraper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
# ------------------- Metrics Scraper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 192.168.1.4:8033/cmii/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
volumes:
- name: tmp-volume
emptyDir: {}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ==================================================================
# 自定义用户配置部分 (ADMIN & READ-ONLY)
# ==================================================================
# ------------------- 1. Admin User (全部权限) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: read-only-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dashboard-view-with-logs
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: read-only-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dashboard-view-with-logs
subjects:
- kind: ServiceAccount
name: read-only-user
namespace: kubernetes-dashboard

View File

@@ -0,0 +1,664 @@
---
---
# ============== Secret - 密码管理 ==============
apiVersion: v1
kind: Secret
metadata:
name: emqx-credentials
namespace: sc-my-uav-260202
labels:
cmii.type: middleware
cmii.app: helm-emqxs
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
type: Opaque
stringData:
# Dashboard管理员密码
dashboard-admin-password: "odD8#Ve7.B"
# MQTT用户密码
mqtt-admin-password: "odD8#Ve7.B"
---
# ============== ServiceAccount ==============
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
---
# ============== Role - RBAC ==============
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
rules:
- apiGroups: [""]
resources:
- endpoints
- pods
verbs:
- get
- watch
- list
---
# ============== RoleBinding ==============
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: sc-my-uav-260202
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
# ============== ConfigMap - Bootstrap配置文件 ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-bootstrap-config
namespace: sc-my-uav-260202
labels:
cmii.type: middleware
cmii.app: helm-emqxs
data:
# 主配置文件 - 覆盖默认配置
emqx.conf: |
# 节点配置
node {
name = "emqx@${POD_NAME}.helm-emqxs-headless.sc-my-uav-260202.svc.cluster.local"
cookie = "emqx-cluster-cookie-secret"
data_dir = "/opt/emqx/data"
}
# 集群配置
cluster {
name = emqxcl
# 单节点 建议为 manual 多节点为k8s
discovery_strategy = manual
k8s {
apiserver = "https://kubernetes.default.svc.cluster.local:443"
service_name = "helm-emqxs-headless"
# 这里可以改为 hostname
address_type = dns
namespace = "sc-my-uav-260202"
suffix = "svc.cluster.local"
}
}
# 日志配置
log {
console {
enable = true
level = info
}
file {
enable = true
level = warning
path = "/opt/emqx/log"
}
}
# Dashboard配置
dashboard {
listeners.http {
bind = "0.0.0.0:18083"
}
default_username = "admin"
default_password = "public"
}
# 监听器配置
listeners.tcp.default {
bind = "0.0.0.0:1883"
max_connections = 1024000
}
listeners.ws.default {
bind = "0.0.0.0:8083"
max_connections = 1024000
websocket.mqtt_path = "/mqtt"
}
listeners.ssl.default {
bind = "0.0.0.0:8883"
max_connections = 512000
}
# 认证配置 - 使用内置数据库
authentication = [
{
mechanism = password_based
backend = built_in_database
user_id_type = username
password_hash_algorithm {
name = sha256
salt_position = suffix
}
# Bootstrap文件路径 - 用于初始化用户
bootstrap_file = "/opt/emqx/data/bootstrap_users.json"
bootstrap_type = plain
}
]
# 授权配置
authorization {
no_match = deny
deny_action = disconnect
sources = [
{
type = built_in_database
enable = true
}
]
}
# MQTT协议配置
mqtt {
max_packet_size = "1MB"
max_clientid_len = 65535
max_topic_levels = 128
max_qos_allowed = 2
max_topic_alias = 65535
retain_available = true
wildcard_subscription = true
shared_subscription = true
}
---
# ============== ConfigMap - Users & ACL (严格 JSON 格式) ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-bootstrap-users
namespace: sc-my-uav-260202
data:
bootstrap_users.json: |
[
{ "user_id": "admin", "password": "odD8#Ve7.B", "is_superuser": true },
{ "user_id": "cmlc", "password": "odD8#Ve7.B", "is_superuser": false }
]
# 【修改点】既然有jq这里使用标准的 JSON 数组格式,最不容易出错
bootstrap_acl.json: |
[
{
"username": "admin",
"rules": [
{"action": "all", "permission": "allow", "topic": "#"}
]
},
{
"username": "cmlc",
"rules": [
{"action": "publish", "permission": "allow", "topic": "#"},
{"action": "subscribe", "permission": "allow", "topic": "#"}
]
}
]
---
# ============== ConfigMap - 初始化脚本 (修正版) ==============
apiVersion: v1
kind: ConfigMap
metadata:
name: emqx-init-dashboard
namespace: sc-my-uav-260202
data:
init-dashboard.sh: |
#!/bin/bash
set -e
DASHBOARD_USER="admin"
DASHBOARD_PASS="${DASHBOARD_ADMIN_PASSWORD}"
EMQX_API="http://localhost:18083/api/v5"
ACL_FILE="/bootstrap/bootstrap_acl.json"
# 辅助函数:打印带时间戳的日志
log() {
echo "[$(date +'%H:%M:%S')] $1"
}
log "======================================"
log "初始化 Dashboard 与 ACL (Debug Version)"
log "======================================"
# ----------------------------------------------------------------
# 1. 等待 EMQX API 就绪
# ----------------------------------------------------------------
log "[1/4] 等待 EMQX API 就绪..."
for i in $(seq 1 60); do
if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
log "✓ EMQX API 已就绪"
break
fi
if [ $i -eq 60 ]; then
log "✗ EMQX API 启动超时"
exit 1
fi
sleep 5
done
# ----------------------------------------------------------------
# 2. 修改 Dashboard 密码
# ----------------------------------------------------------------
log "[2/4] 检查/更新 Dashboard 密码..."
# 获取 Token (尝试默认密码)
LOGIN_RESP=$(curl -s -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"public\"}")
TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
if [ -n "$TOKEN" ]; then
log " 检测到默认密码,正在更新..."
curl -s -f -X POST "${EMQX_API}/users/${DASHBOARD_USER}/change_pwd" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"old_pwd\":\"public\",\"new_pwd\":\"${DASHBOARD_PASS}\"}"
log " ✓ Dashboard 密码已更新"
else
log " 无法使用默认密码登录,跳过更新(可能已修改)"
fi
# ----------------------------------------------------------------
# 3. 导入 ACL 规则
# ----------------------------------------------------------------
echo "[3/3] 导入ACL规则..."
# 重新登录获取最新 Token
LOGIN_RESP=$(curl -sS -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}")
TOKEN=$(echo "$LOGIN_RESP" | jq -r '.token // empty')
if [ -z "$TOKEN" ]; then
echo " ✗ 无法获取Token请检查密码设置"
exit 0
fi
if [ -f "$ACL_FILE" ]; then
echo " 正在解析 ACL 文件: $ACL_FILE"
if ! jq -e . "$ACL_FILE" >/dev/null 2>&1; then
echo " ✗ ACL 文件 JSON 格式错误,跳过处理"
exit 0
fi
jq -c '.[]' "$ACL_FILE" | while read -r user_config; do
USERNAME=$(echo "$user_config" | jq -r '.username // empty')
# ✅ PUT/POST 都需要 username + rulesusername 是 required
REQ_BODY=$(echo "$user_config" | jq -c '{username: .username, rules: .rules}')
if [ -z "$USERNAME" ]; then
echo " ✗ ACL 条目缺少 username跳过"
continue
fi
echo " 配置用户 ${USERNAME} 的ACL规则..."
# 1) 优先 PUT覆盖更新
http_code=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
-X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "$REQ_BODY")
if [ "$http_code" = "204" ]; then
echo " ✓ PUT 更新成功"
elif [ "$http_code" = "404" ]; then
# 2) 不存在则 POST 创建
http_code2=$(curl -sS -o /tmp/emqx_acl_resp.json -w '%{http_code}' \
-X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "$REQ_BODY")
if [ "$http_code2" = "204" ]; then
echo " ✓ POST 创建成功"
else
echo " ✗ POST 失败 (HTTP ${http_code2})$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
exit 1
fi
else
echo " ✗ PUT 失败 (HTTP ${http_code})$(cat /tmp/emqx_acl_resp.json 2>/dev/null || true)"
exit 1
fi
# 3) 导入后验证(可选但强烈建议保留)
verify_code=$(curl -sS -o /tmp/emqx_acl_verify.json -w '%{http_code}' \
-H "Authorization: Bearer ${TOKEN}" \
"${EMQX_API}/authorization/sources/built_in_database/rules/users/${USERNAME}")
if [ "$verify_code" = "200" ]; then
echo " ✓ 验证成功:$(cat /tmp/emqx_acl_verify.json | jq -c '.')"
else
echo " ✗ 验证失败 (HTTP ${verify_code})$(cat /tmp/emqx_acl_verify.json 2>/dev/null || true)"
exit 1
fi
done
echo " ✓ ACL 规则导入完成"
else
echo " 未找到 ACL 文件"
fi
---
# ============== StatefulSet ==============
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
replicas: 1
serviceName: helm-emqxs-headless
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: cmii.app
operator: In
values:
- helm-emqxs
topologyKey: kubernetes.io/hostname
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
securityContext:
fsGroup: 1000
runAsUser: 1000
# InitContainer - 准备bootstrap文件
initContainers:
- name: prepare-bootstrap
# 动态选择 tools 镜像
image: 192.168.1.4:8033/cmii/tools:1.0
imagePullPolicy: IfNotPresent
# =========================================================
# 权限: 必须以 root 身份运行才能 chown
# =========================================================
securityContext:
runAsUser: 0
command:
- /bin/sh
- -c
- |
echo "准备bootstrap文件..."
# 创建数据目录
mkdir -p /opt/emqx/data
# 复制bootstrap文件到数据目录
# 只在文件不存在时复制,避免覆盖已有数据
if [ ! -f /opt/emqx/data/bootstrap_users.json ]; then
cp /bootstrap-src/bootstrap_users.json /opt/emqx/data/
echo "✓ 已复制用户bootstrap文件"
else
echo " 用户bootstrap文件已存在跳过"
fi
# 设置权限 (现在有root权限可以成功)
chown -R 1000:1000 /opt/emqx/data
echo "✓ Bootstrap准备完成"
volumeMounts:
- name: emqx-data
mountPath: /opt/emqx/data
- name: bootstrap-users
mountPath: /bootstrap-src
containers:
# 主容器 - EMQX
- name: emqx
# 动态选择 emqx 镜像
image: 192.168.1.4:8033/cmii/emqx:5.8.8
imagePullPolicy: IfNotPresent
env:
# Pod信息
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: EMQX_DATA_DIR
value: "/opt/emqx/data"
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: ws
containerPort: 8083
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
resources:
requests:
cpu: "500m"
memory: "512Mi"
limits:
cpu: "2000m"
memory: "2Gi"
livenessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
failureThreshold: 3
readinessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
startupProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 30
volumeMounts:
- name: emqx-data
mountPath: /opt/emqx/data
# 使用 subPath 挂载单个配置文件,避免覆盖目录
- name: bootstrap-config
mountPath: /opt/emqx/etc/emqx.conf
subPath: emqx.conf
# Sidecar - 初始化Dashboard密码和ACL
- name: init-dashboard
# 动态选择 tools 镜像
image: 192.168.1.4:8033/cmii/tools:1.0
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
# 等待主容器启动
echo "等待EMQX启动..."
sleep 20
# 执行初始化
/bin/sh /scripts/init-dashboard.sh
# 保持运行
echo "初始化完成,进入守护模式..."
while true; do sleep 3600; done
env:
- name: DASHBOARD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: emqx-credentials
key: dashboard-admin-password
resources:
requests:
cpu: "100m"
memory: "64Mi"
limits:
cpu: "200m"
memory: "128Mi"
volumeMounts:
- name: init-script
mountPath: /scripts
- name: bootstrap-users
mountPath: /bootstrap
volumes:
- name: bootstrap-config
configMap:
name: emqx-bootstrap-config
- name: bootstrap-users
configMap:
name: emqx-bootstrap-users
- name: init-script
configMap:
name: emqx-init-dashboard
defaultMode: 0755
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
---
# ============== Service - Headless ==============
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: sc-my-uav-260202
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
targetPort: 1883
- name: mqttssl
port: 8883
targetPort: 8883
- name: ws
port: 8083
targetPort: 8083
- name: dashboard
port: 18083
targetPort: 18083
- name: ekka
port: 4370
targetPort: 4370
---
# ============== Service - NodePort ==============
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
targetPort: 1883
nodePort: 31883
- name: dashboard
port: 18083
targetPort: 18083
nodePort: 38085
- name: ws
port: 8083
targetPort: 8083
nodePort: 38083
- name: mqttssl
port: 8883
targetPort: 8883

View File

@@ -0,0 +1,114 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: sc-my-uav-260202
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-lite
namespace: sc-my-uav-260202
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-lite
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: 2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-lite
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-lite
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-lite
image: 192.168.1.4:8033/cmii/cmii-uav-platform-lite:2.0.0-2026012703-noicp
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: sc-my-uav-260202
- name: APPLICATION_NAME
value: cmii-uav-platform-lite
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-lite
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-lite
namespace: sc-my-uav-260202
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-lite
octopus.control: frontend-app-wdd
app.kubernetes.io/version: "2.0"
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-lite
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528

View File

@@ -0,0 +1,995 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: sc-my-uav-260202
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
nginx.ingress.kubernetes.io/rewrite-target: /$1
spec:
rules:
- host: fake-domain.sc-my-uav-260202.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform
port:
number: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-platform-supervision
port:
number: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-platform-supervisionh5
port:
number: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform
port:
number: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-ai-brain
port:
number: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-armypeople
port:
number: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-awareness
port:
number: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-base
port:
number: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-blockchain
port:
number: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-classification
port:
number: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-cms-portal
port:
number: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-detection
port:
number: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-dikongzhixingh5
port:
number: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-dispatchh5
port:
number: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-emergency-rescue
port:
number: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-eventsh5
port:
number: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-flight-control
port:
number: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-hljtt
port:
number: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-hyperspectral
port:
number: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-iot-manager
port:
number: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-jiangsuwenlv
port:
number: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-logistics
port:
number: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-media
port:
number: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-mianyangbackend
port:
number: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-multiterminal
port:
number: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-mws
port:
number: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-oms
port:
number: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-open
port:
number: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-pilot2-to-cloud
port:
number: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-qingdao
port:
number: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-qinghaitourism
port:
number: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-renyike
port:
number: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-scanner
port:
number: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-security
port:
number: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-securityh5
port:
number: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-seniclive
port:
number: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-share
port:
number: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-smauth
port:
number: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-smsecret
port:
number: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-splice
port:
number: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-threedsimulation
port:
number: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-traffic
port:
number: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uas
port:
number: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uaskny
port:
number: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uasms
port:
number: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-uasmskny
port:
number: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-platform-visualization
port:
number: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-platform-manager
port:
number: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-platform-security-center
port:
number: 9528
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: sc-my-uav-260202
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
spec:
rules:
- host: cmii-admin-data.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-data
port:
number: 8080
- host: cmii-admin-gateway.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-gateway
port:
number: 8080
- host: cmii-admin-user.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-user
port:
number: 8080
- host: cmii-app-release.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-app-release
port:
number: 8080
- host: cmii-open-gateway.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-open-gateway
port:
number: 8080
- host: cmii-sky-converge.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-sky-converge
port:
number: 8080
- host: cmii-suav-supervision.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-suav-supervision
port:
number: 8080
- host: cmii-uas-datahub.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-datahub
port:
number: 8080
- host: cmii-uas-gateway.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-gateway
port:
number: 8080
- host: cmii-uas-lifecycle.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-lifecycle
port:
number: 8080
- host: cmii-uav-advanced5g.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-advanced5g
port:
number: 8080
- host: cmii-uav-airspace.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-airspace
port:
number: 8080
- host: cmii-uav-alarm.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-alarm
port:
number: 8080
- host: cmii-uav-autowaypoint.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-autowaypoint
port:
number: 8080
- host: cmii-uav-brain.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-brain
port:
number: 8080
- host: cmii-uav-bridge.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-bridge
port:
number: 8080
- host: cmii-uav-cloud-live.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-cloud-live
port:
number: 8080
- host: cmii-uav-clusters.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-clusters
port:
number: 8080
- host: cmii-uav-cms.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-cms
port:
number: 8080
- host: cmii-uav-data-post-process.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-data-post-process
port:
number: 8080
- host: cmii-uav-depotautoreturn.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-depotautoreturn
port:
number: 8080
- host: cmii-uav-developer.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-developer
port:
number: 8080
- host: cmii-uav-device.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-device
port:
number: 8080
- host: cmii-uav-emergency.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-emergency
port:
number: 8080
- host: cmii-uav-fwdd.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-fwdd
port:
number: 8080
- host: cmii-uav-gateway.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gateway
port:
number: 8080
- host: cmii-uav-gis-server.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gis-server
port:
number: 8080
- host: cmii-uav-grid-datasource.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-datasource
port:
number: 8080
- host: cmii-uav-grid-engine.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-engine
port:
number: 8080
- host: cmii-uav-grid-manage.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-grid-manage
port:
number: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-industrial-portfolio
port:
number: 8080
- host: cmii-uav-integration.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-integration
port:
number: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-iot-dispatcher
port:
number: 8080
- host: cmii-uav-iot-manager.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-iot-manager
port:
number: 8080
- host: cmii-uav-kpi-monitor.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-kpi-monitor
port:
number: 8080
- host: cmii-uav-logger.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-logger
port:
number: 8080
- host: cmii-uav-material-warehouse.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-material-warehouse
port:
number: 8080
- host: cmii-uav-mission.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-mission
port:
number: 8080
- host: cmii-uav-mqtthandler.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-mqtthandler
port:
number: 8080
- host: cmii-uav-multilink.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-multilink
port:
number: 8080
- host: cmii-uav-notice.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-notice
port:
number: 8080
- host: cmii-uav-oauth.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-oauth
port:
number: 8080
- host: cmii-uav-process.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-process
port:
number: 8080
- host: cmii-uav-sec-awareness.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sec-awareness
port:
number: 8080
- host: cmii-uav-security-trace.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-security-trace
port:
number: 8080
- host: cmii-uav-sense-adapter.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sense-adapter
port:
number: 8080
- host: cmii-uav-surveillance.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-surveillance
port:
number: 8080
- host: cmii-uav-sync.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-sync
port:
number: 8080
- host: cmii-uav-tcp-server.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-tcp-server
port:
number: 8080
- host: cmii-uav-threedsimulation.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-threedsimulation
port:
number: 8080
- host: cmii-uav-tower.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-tower
port:
number: 8080
- host: cmii-uav-user.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-user
port:
number: 8080
- host: cmii-uav-watchdog.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-watchdog
port:
number: 8080
- host: cmii-uav-waypoint.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-waypoint
port:
number: 8080
- host: cmii-uavms-pyfusion.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-pyfusion
port:
number: 8080
- host: cmii-uavms-security-center.uavcloud-sc-my-202602.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: cmii-uavms-security-center
port:
number: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: sc-my-uav-260202
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.2
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: 'true'
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
spec:
rules:
- host: fake-domain.sc-my-uav-260202.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-admin-gateway
port:
number: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-open-gateway
port:
number: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uav-gateway
port:
number: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-uas-gateway
port:
number: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
service:
name: cmii-sky-converge
port:
number: 8080

View File

@@ -0,0 +1,832 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: sc-my-uav-260202
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/iot)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/lite)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.sc-my-uav-260202.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-iot-manager
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /lite/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-lite
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uaskny
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasmskny
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: sc-my-uav-260202
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-datahub.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-datahub
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-iot-manager.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-manager
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-tcp-server.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tcp-server
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-watchdog.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-watchdog
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-pyfusion.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-pyfusion
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-mianyang.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: sc-my-uav-260202
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.sc-my-uav-260202.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: sc-my-uav-260202
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: sc-my-uav-260202
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: "2.0"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 192.168.1.4:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 192.168.1.4:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/sc-my-uav-260202/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: sc-my-uav-260202
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0"
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: sc-my-uav-260202
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0"
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: sc-my-uav-260202
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0"
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: "2.0"
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 192.168.1.4:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 192.168.1.4:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 192.168.1.4:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.1.6
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.6
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: sc-my-uav-260202
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: "2.0"
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: sc-my-uav-260202
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: "2.0"
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: sc-my-uav-260202
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: "2.0"
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: sc-my-uav-260202
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: "2.0"
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sc-my-uav-260202
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sc-my-uav-260202
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sc-my-uav-260202
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 192.168.1.4:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 192.168.1.4:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sc-my-uav-260202
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sc-my-uav-260202
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sc-my-uav-260202
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.1.4:8033/cmii/redis:6.2.14-debian-11-r1
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.sc-my-uav-260202.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: sc-my-uav-260202
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.133.66.183:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 192.168.1.4:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.133.66.183
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: sc-my-uav-260202/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: sc-my-uav-260202/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 192.168.1.4:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://helm-minio:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: sc-my-uav-260202/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 192.168.1.4:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: sc-my-uav-260202
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.133.66.183:31935'
rtsp: 'rtsp://36.133.66.183:30554'
srt: 'srt://36.133.66.183:30556'
flv: 'http://36.133.66.183:30500'
hls: 'http://36.133.66.183:30500'
rtc: 'webrtc://36.133.66.183:30080'
replay: 'https://36.133.66.183:30333'
minio:
endpoint: http://helm-minio:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,280 @@
---
# Source: influxdb/templates/networkpolicy.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
spec:
podSelector:
matchLabels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/name: influxdb
app.kubernetes.io/component: influxdb
policyTypes:
- Ingress
- Egress
egress:
- {}
ingress:
# Allow inbound connections
- ports:
- port: 8086
protocol: TCP
- port: 8088
protocol: TCP
---
# Source: influxdb/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
app.kubernetes.io/component: influxdb
automountServiceAccountToken: false
---
# Source: influxdb/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
type: Opaque
data:
admin-user-password: "WTFjJVJoI2ZIMw=="
admin-user-token: "WXVubkhKQVNBQWRqMjNyYXNRQVdkNjIxZXJHQVM4MmthcWo="
---
# Source: influxdb/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
app.kubernetes.io/component: influxdb
spec:
storageClassName: nfs-prod-distribute
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
---
# Source: influxdb/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
app.kubernetes.io/component: influxdb
spec:
type: ClusterIP
sessionAffinity: None
ports:
- port: 8086
targetPort: http
protocol: TCP
name: http
nodePort: null
- port: 8088
targetPort: rpc
protocol: TCP
name: rpc
nodePort: null
selector:
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/name: influxdb
app.kubernetes.io/component: influxdb
---
# Source: influxdb/templates/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-influxdb
namespace: sc-my-uav-260202
labels:
app.kubernetes.io/component: influxdb
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: influxdb
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/name: influxdb
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: influxdb
app.kubernetes.io/instance: helm-influxdb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: influxdb
app.kubernetes.io/version: 2.7.11
helm.sh/chart: influxdb-6.6.11
spec:
volumes:
- name: empty-dir
emptyDir: {}
- name: influxdb-credentials
secret:
secretName: helm-influxdb
defaultMode: 420
- name: data
persistentVolumeClaim:
claimName: helm-influxdb
containers:
- name: influxdb
image: 192.168.1.4:8033/cmii/influxdb:2.7.11-debian-12-r19
ports:
- name: http
containerPort: 8086
protocol: TCP
- name: rpc
containerPort: 8088
protocol: TCP
env:
- name: BITNAMI_DEBUG
value: 'true'
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: INFLUXDB_HTTP_AUTH_ENABLED
value: 'true'
- name: INFLUXDB_CREATE_USER_TOKEN
value: 'no'
- name: INFLUXDB_ADMIN_USER
value: cmlc
- name: INFLUXDB_ADMIN_USER_PASSWORD_FILE
value: /opt/bitnami/influxdb/secrets/admin-user-password
- name: INFLUXDB_ADMIN_USER_TOKEN_FILE
value: /opt/bitnami/influxdb/secrets/admin-user-token
- name: INFLUXDB_ADMIN_BUCKET
value: home
- name: INFLUXDB_ADMIN_ORG
value: docs
resources:
limits:
cpu: '4'
ephemeral-storage: 4Gi
memory: 4Gi
requests:
cpu: '2'
ephemeral-storage: 50Mi
memory: 4Gi
volumeMounts:
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
- name: empty-dir
mountPath: /opt/bitnami/influxdb/etc
subPath: app-conf-dir
- name: influxdb-credentials
mountPath: /opt/bitnami/influxdb/secrets/
- name: data
mountPath: /bitnami/influxdb
livenessProbe:
httpGet:
path: /
port: http
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 30
periodSeconds: 45
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- bash
- '-c'
- |
. /opt/bitnami/scripts/libinfluxdb.sh
influxdb_env
export INFLUX_USERNAME="$INFLUXDB_ADMIN_USER"
export INFLUX_PASSWORD="$INFLUXDB_ADMIN_USER_PASSWORD"
timeout 29s influx ping --host http://$POD_IP:8086
initialDelaySeconds: 120
timeoutSeconds: 30
periodSeconds: 45
successThreshold: 1
failureThreshold: 6
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
privileged: false
seLinuxOptions: {}
runAsUser: 1001
runAsGroup: 1001
runAsNonRoot: true
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: helm-influxdb
serviceAccount: helm-influxdb
securityContext:
fsGroup: 1001
fsGroupChangePolicy: Always
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- mianyang
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

View File

@@ -0,0 +1,252 @@
nodes:
- address: 192.168.1.4
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.1.4
hostname_override: "master-192-168-1-4"
labels:
ingress-deploy: true
uavcloud.env: mianyang
- address: 192.168.1.3
user: root
role:
- worker
internal_address: 192.168.1.3
hostname_override: "1-worker-192-168-1-3"
labels:
uavcloud.env: mianyang
- address: 192.168.1.5
user: root
role:
- worker
internal_address: 192.168.1.5
hostname_override: "2-worker-192-168-1-5"
labels:
uavcloud.env: mianyang
- address: 192.168.1.2
user: root
role:
- worker
internal_address: 192.168.1.2
hostname_override: "3-mysql-192-168-1-2"
labels:
mysql-deploy: true
- address: 192.168.1.6
user: root
role:
- worker
internal_address: 192.168.1.6
hostname_override: "4-storage-192-168-1-6"
labels:
minio-deploy: true
doris-deploy: true
authentication:
strategy: x509
sans:
- "192.168.1.4"
- "36.133.66.183"
private_registries:
- url: 192.168.1.4:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.30.14-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: false
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.96.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 30500
https_port: 31500
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -15,7 +15,7 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base selinux
/usr/local/bin/agent-wdd base sysconfig
/usr/local/bin/agent-wdd zsh
/usr/local/bin/agent-wdd zsh cn
# 首先需要下载所有的依赖!
@@ -56,8 +56,7 @@ done
export server=172.16.100.62
scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config && /usr/local/bin/agent-wdd base ssh key"
# 安装docker-compose
@@ -66,12 +65,35 @@ chmod +x /usr/local/bin/docker-compose
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
# APT代理加速
scp /root/wdd/apt-change.sh root@${server}:/root/wdd/apt-change.sh
ssh root@${server} "bash /root/wdd/apt-change.sh -y"
ssh root@${server} "echo \"\"> /etc/apt/apt.conf.d/01proxy"
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"
ssh root@${server} "apt-get update"
ssh root@${server} "apt-get install -y parted"
# 磁盘初始化
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh"
# master节点安装docker
bash /root/wdd/docker.sh
# 在线安装docker 通过APT代理
scp /etc/apt/keyrings/docker.gpg root@${server}:/root/wdd/
scp /root/wdd/docker.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/docker.sh"
ssh root@${server} "docker info"
ssh root@${server} "docker compose version"
# 复制文件-docker
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
@@ -81,7 +103,6 @@ ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
ssh root@${server} "/usr/local/bin/agent-wdd base selinux"
@@ -101,19 +122,34 @@ ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
ssh root@${server} "rm /root/wdd/*.sh"
# 主节点执行 安装harbor仓库
/usr/local/bin/agent-wdd base harbor install
# 安装rke kubectl
mv /root/wdd/rke_amd64 /usr/local/bin/rke
mv /root/wdd/rke_linux-amd64 /usr/local/bin/rke
chmod +x /usr/local/bin/rke
mv /root/wdd/kubectl /usr/local/bin/kubectl
mv /root/wdd/kubectl_v1.30.14_amd64 /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
# 安装 k8s-证书
mkdir /root/.kube
cp ./kube_config_cluster.yml /root/.kube/config
curl -s https://172.29.137.125
# 环境测试
DEFAULT_HTTP_BACKEND_IP=$(kubectl -n ingress-nginx get svc default-http-backend -o jsonpath='{.spec.clusterIP}')
# master节点
curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"x
# worker节点
ssh root@"$server" "DEFAULT_HTTP_BACKEND_IP='$DEFAULT_HTTP_BACKEND_IP' bash -s" <<'EOF'
echo "DEFAULT_HTTP_BACKEND_IP=$DEFAULT_HTTP_BACKEND_IP"
curl -s "http://${DEFAULT_HTTP_BACKEND_IP}"
echo
EOF

View File

@@ -1,112 +0,0 @@
#!/bin/bash
set -eo pipefail
# 定义脚本参数
DOCKER_VERSION="20.10" # 在这里修改期望的版本
UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
ALIYUN_MIRROR="https://mirrors.aliyun.com"
DOCKER_COMPOSE_VERSION="2.26.1"
# 1. 检测Ubuntu环境
check_ubuntu() {
if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
echo "错误本脚本仅支持Ubuntu系统"
exit 1
fi
local version_id=$(lsb_release -rs)
if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
echo "错误不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
exit 1
fi
}
# 2. 替换阿里云源
set_aliyun_mirror() {
sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
}
# 3. 准备Docker仓库
prepare_docker_env() {
sudo mkdir -p /etc/apt/keyrings
curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
local codename=$(lsb_release -cs)
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
}
# 4. 版本解析优化版本
get_docker_version() {
local target_version=""
if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
# 提取大版本下最高小版本
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
| sort -rV \
| head -1)
elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# 精确版本匹配
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
fi
[ -z "$target_version" ] && echo "错误找不到Docker版本 $DOCKER_VERSION" && exit 1
echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
}
# 5. 主流程
main() {
check_ubuntu
echo "-- 设置阿里云源 --"
set_aliyun_mirror
echo "-- 准备Docker仓库 --"
prepare_docker_env
echo "-- 解析Docker版本 --"
local full_version=$(get_docker_version)
echo "选择版本:$full_version"
echo "-- 安装组件 --"
sudo apt-get install -y \
docker-ce-cli="$full_version" \
docker-ce="$full_version" \
docker-ce-rootless-extras="$full_version" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
echo "-- 安装docker-compose --"
sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "-- 禁用自动更新 --"
sudo apt-mark hold docker-ce docker-ce-cli containerd.io
echo "-- 启动服务 --"
sudo systemctl enable docker && sudo systemctl start docker
echo -e "\n=== 安装完成 ==="
docker --version
docker-compose --version
}
main
请写一个shell基于上述的部分安装逻辑实现如下的功能
脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
1. 检测当前主机是否是ubuntu环境本脚本支支持Ubuntu
2. 获取本机的版本号支持ubuntu18.04 20.04 22.04 24.04的版本
3. 根据ubuntu版本修改apt的镜像源为阿里源
4. 在线安装符合变量版本的docker在线安装docker-compose安装常用的插件
5. 禁止docker自动更新

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
# 用户配置部分
DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
FS_TYPE="ext4" # 文件系统类型支持ext4/xfs默认ext4
#----------------------------------------------------------
# 核心逻辑(建议非必要不修改)
#----------------------------------------------------------
function check_prerequisites() {
# 必须root权限运行检查
[[ $EUID -ne 0 ]] && echo -e "\033[31m错误必须使用root权限运行此脚本\033[0m" && exit 1
# 磁盘存在性检查
[[ ! -b "$DISK" ]] && echo -e "\033[31m错误磁盘 $DISK 不存在\033[0m" && exit 1
# 文件系统类型校验
if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
echo -e "\033[31m错误不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
exit 1
fi
}
function prepare_disk() {
local partition="${DISK}1"
echo -e "\033[34m正在初始化磁盘分区...\033[0m"
parted "$DISK" --script mklabel gpt
parted "$DISK" --script mkpart primary 0% 100%
parted "$DISK" --script set 1 lvm on
partprobe "$DISK" # 确保系统识别新分区表
echo -e "\033[34m正在创建LVM结构...\033[0m"
pvcreate "$partition"
vgcreate datavg "$partition"
lvcreate -y -l 100%FREE -n lvdata datavg
}
function format_and_mount() {
echo -e "\033[34m格式化逻辑卷...\033[0m"
if [[ "$FS_TYPE" == "ext4" ]]; then
mkfs.ext4 -F "/dev/datavg/lvdata"
else
mkfs.xfs -f "/dev/datavg/lvdata"
fi
echo -e "\033[34m设置挂载配置...\033[0m"
mkdir -p "$MOUNT_PATH"
UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
mount -a
}
function verify_result() {
echo -e "\n\033[1;36m最终验证结果\033[0m"
lsblk -f "$DISK"
echo -e "\n磁盘空间使用情况"
df -hT "$MOUNT_PATH"
}
# 主执行流程
check_prerequisites
prepare_disk
format_and_mount
verify_result
echo -e "\n\033[32m操作执行完毕请仔细核查上述输出信息\033[0m"
#请写一个shell脚本脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
#1.将物理磁盘的盘符修改为gpt格式
#2.将物理磁盘全部空间创建一个分区分区格式为lvm
#3.将分区分配给逻辑卷datavg
#4.将datavg所有可用的空间分配给逻辑卷lvdata
#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
#6.创建变量挂载点路径
#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
#8.执行lsblk和df -TH查看分区是否正确挂载

View File

@@ -0,0 +1,594 @@
#!/usr/bin/env bash
# ==============================================================================
# Metadata
# ==============================================================================
# Author : Smith Wang (Refactor by ChatGPT)
# Version : 2.0.0
# License : MIT
# Description : Configure Docker APT repository (mirror) and install Docker on
# Ubuntu (18.04/20.04/22.04/24.04) with robust offline handling.
#
# Modules :
# - Logging & Error Handling
# - Environment & Dependency Checks
# - Public Network Reachability Detection
# - Docker GPG Key Installation (Online/Offline)
# - Docker APT Repo Configuration
# - Docker Installation & Service Setup
#
# Notes :
# - This script DOES NOT modify Ubuntu APT sources (/etc/apt/sources.list)
# - This script DOES NOT set APT proxy (assumed handled elsewhere)
# - If public network is NOT reachable and local GPG key is missing, script
# will NOT proceed (per your requirement).
#
# ShellCheck : Intended clean for bash v5+ with: shellcheck -x <script>
# ==============================================================================
set -euo pipefail
# ==============================================================================
# Global Constants
# ==============================================================================
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="2.0.0"
# Default mirror for Docker repo (you asked: only focus on docker source)
readonly DEFAULT_DOCKER_APT_MIRROR="https://mirrors.aliyun.com/docker-ce/linux/ubuntu"
# Default keyring location (recommended by modern Ubuntu)
readonly DEFAULT_KEYRING_PATH="/etc/apt/keyrings/docker.gpg"
# Exit codes
readonly EC_OK=0
readonly EC_GENERAL=1
readonly EC_UNSUPPORTED_OS=10
readonly EC_DEPENDENCY=11
readonly EC_OFFLINE_NO_KEY=20
readonly EC_APT_FAILURE=30
# ==============================================================================
# Configurable Variables (Environment Overrides)
# ==============================================================================
# You may export these before running:
# DOCKER_VERSION="20.10" # or "20.10.15" (optional)
# DOCKER_APT_MIRROR="https://..."
# DOCKER_KEYRING_PATH="/root/wdd/docker.gpg"
# LOCAL_DOCKER_GPG="/path/to/docker.gpg" (optional)
# LOG_LEVEL="DEBUG|INFO|WARN|ERROR"
DOCKER_VERSION="${DOCKER_VERSION:-20.10}"
DOCKER_APT_MIRROR="${DOCKER_APT_MIRROR:-$DEFAULT_DOCKER_APT_MIRROR}"
DOCKER_KEYRING_PATH="${DOCKER_KEYRING_PATH:-$DEFAULT_KEYRING_PATH}"
LOCAL_DOCKER_GPG="${LOCAL_DOCKER_GPG:-/root/wdd/docker.gpg}"
LOG_LEVEL="${LOG_LEVEL:-INFO}"
# ==============================================================================
# Function Call Graph (ASCII)
# ==============================================================================
# main
# |
# +--> init_traps
# |
# +--> check_platform
# |
# +--> ensure_prerequisites
# |
# +--> detect_public_network
# | |
# | +--> can_fetch_url_head
# |
# +--> ensure_docker_gpg_key
# | |
# | +--> install_key_from_online
# | | |
# | | +--> require_cmd (curl, gpg)
# | |
# | +--> install_key_from_local
# |
# +--> configure_docker_repo
# |
# +--> install_docker_packages
# | |
# | +--> resolve_docker_version
# |
# +--> pin_docker_packages
# |
# +--> enable_docker_service
# ==============================================================================
# ==============================================================================
# Logging
# ==============================================================================
### Map log level string to numeric value.
### @param level_str string Level string (DEBUG/INFO/WARN/ERROR)
### @return 0 Always returns 0; outputs numeric level to stdout
### @require none
log_level_to_num() {
case "${1:-INFO}" in
DEBUG) echo 10 ;;
INFO) echo 20 ;;
WARN) echo 30 ;;
ERROR) echo 40 ;;
*) echo 20 ;;
esac
}
### Unified logger with level gating.
### @param level string Log level
### @param message string Message
### @return 0 Always returns 0
### @require date
log() {
local level="${1:?level required}"
shift
local message="${*:-}"
local now
now="$(date '+%F %T')"
local current_level_num wanted_level_num
current_level_num="$(log_level_to_num "$LOG_LEVEL")"
wanted_level_num="$(log_level_to_num "$level")"
if [ "$wanted_level_num" -lt "$current_level_num" ]; then
return 0
fi
# > Keep format stable for parsing by log collectors
printf '%s [%s] %s: %s\n' "$now" "$level" "$SCRIPT_NAME" "$message" >&2
}
# ==============================================================================
# Error Handling & Traps
# ==============================================================================
### Trap handler for unexpected errors.
### @param exit_code int Exit code from failing command
### @return 0 Always returns 0
### @require none
on_error() {
local exit_code="${1:-$EC_GENERAL}"
log ERROR "Unhandled error occurred (exit_code=${exit_code})."
exit "$exit_code"
}
### Trap handler for script exit.
### @param exit_code int Exit code
### @return 0 Always returns 0
### @require none
on_exit() {
local exit_code="${1:-$EC_OK}"
if [ "$exit_code" -eq 0 ]; then
log INFO "Done."
else
log WARN "Exited with code ${exit_code}."
fi
return 0
}
### Initialize traps (ERR/INT/TERM/EXIT).
### @return 0 Success
### @require none
init_traps() {
trap 'on_error $?' ERR
trap 'log WARN "Interrupted (SIGINT)"; exit 130' INT
trap 'log WARN "Terminated (SIGTERM)"; exit 143' TERM
trap 'on_exit $?' EXIT
}
# ==============================================================================
# Privilege Helpers
# ==============================================================================
### Run a command as root (uses sudo if not root).
### @param cmd string Command to run
### @return 0 Success; non-zero on failure
### @require sudo (if not root)
run_root() {
if [ "$(id -u)" -eq 0 ]; then
# shellcheck disable=SC2068
"$@"
else
# shellcheck disable=SC2068
sudo "$@"
fi
}
# ==============================================================================
# Dependency Checks
# ==============================================================================
### Ensure a command exists in PATH.
### @param cmd_name string Command name
### @return 0 If exists; 1 otherwise
### @require none
require_cmd() {
local cmd_name="${1:?cmd required}"
if ! command -v "$cmd_name" >/dev/null 2>&1; then
log ERROR "Missing dependency: ${cmd_name}"
return 1
fi
return 0
}
# ==============================================================================
# Platform Check
# ==============================================================================
### Check OS is Ubuntu and supported versions.
### @return 0 Supported; exits otherwise
### @require lsb_release, awk
check_platform() {
require_cmd lsb_release || exit "$EC_DEPENDENCY"
local distro version
distro="$(lsb_release -is 2>/dev/null || true)"
version="$(lsb_release -rs 2>/dev/null || true)"
if [ "$distro" != "Ubuntu" ]; then
log ERROR "Unsupported OS: ${distro}. This script supports Ubuntu only."
exit "$EC_UNSUPPORTED_OS"
fi
case "$version" in
18.04|20.04|22.04|24.04) ;;
*)
log ERROR "Unsupported Ubuntu version: ${version}. Supported: 18.04/20.04/22.04/24.04"
exit "$EC_UNSUPPORTED_OS"
;;
esac
log INFO "Platform OK: ${distro} ${version}"
}
# ==============================================================================
# APT Prerequisites
# ==============================================================================
### Install required packages for repository/key management and Docker installation.
### @return 0 Success; exits on apt failures
### @require apt-get
ensure_prerequisites() {
require_cmd apt-get || exit "$EC_DEPENDENCY"
log INFO "Installing prerequisites (does NOT modify APT sources or proxy)..."
# > apt update must work via your existing proxy+mirror scripts
if ! run_root apt-get update; then
log ERROR "apt-get update failed. Check APT proxy / mirror configuration."
exit "$EC_APT_FAILURE"
fi
# > Keep dependencies minimal; curl/gpg used only for online key fetch.
if ! run_root apt-get install -y ca-certificates gnupg lsb-release; then
log ERROR "Failed to install prerequisites."
exit "$EC_APT_FAILURE"
fi
log INFO "Prerequisites installed."
}
# ==============================================================================
# Public Network Reachability
# ==============================================================================
### Check whether we can fetch HTTP headers from a URL (lightweight reachability).
### @param test_url string URL to test
### @return 0 Reachable; 1 otherwise
### @require curl (optional; if missing returns 1)
can_fetch_url_head() {
local test_url="${1:?url required}"
if ! command -v curl >/dev/null 2>&1; then
log WARN "curl not found; cannot test public network reachability via HTTP."
return 1
fi
# > Use short timeout to avoid hanging in restricted networks
curl -fsSI --max-time 3 "$test_url" >/dev/null 2>&1
}
### Detect whether public network access is available for Docker key fetch.
### @return 0 Online; 1 Offline/Uncertain
### @require none
detect_public_network() {
local test_url="${DOCKER_APT_MIRROR%/}/gpg"
log INFO "Detecting public network reachability: HEAD ${test_url}"
if can_fetch_url_head "$test_url"; then
log INFO "Public network reachable for Docker mirror."
return 0
fi
log WARN "Public network NOT reachable (or curl missing). Will try local GPG key."
return 1
}
# ==============================================================================
# Docker GPG Key Management
# ==============================================================================
### Install Docker GPG key from online source (mirror).
### @param gpg_url string GPG URL
### @param keyring_path string Keyring output path
### @return 0 Success; non-zero on failure
### @require curl, gpg, install, mkdir, chmod
install_key_from_online() {
local gpg_url="${1:?gpg_url required}"
local keyring_path="${2:?keyring_path required}"
require_cmd curl || return 1
require_cmd gpg || return 1
# > Write to temp then atomically install to avoid partial files
local tmp_dir tmp_gpg
tmp_dir="$(mktemp -d)"
tmp_gpg="${tmp_dir}/docker.gpg"
log INFO "Fetching Docker GPG key online: ${gpg_url}"
curl -fsSL --max-time 10 "$gpg_url" | gpg --dearmor -o "$tmp_gpg"
run_root mkdir -p "$(dirname "$keyring_path")"
run_root install -m 0644 "$tmp_gpg" "$keyring_path"
run_root chmod a+r "$keyring_path" || true
rm -rf "$tmp_dir"
log INFO "Docker GPG key installed: ${keyring_path}"
return 0
}
### Install Docker GPG key from local file (offline-friendly).
### @param local_gpg_path string Local GPG file path
### @param keyring_path string Keyring output path
### @return 0 Success; 1 if local key missing; non-zero on other failures
### @require install, mkdir, chmod
install_key_from_local() {
local local_gpg_path="${1:?local_gpg_path required}"
local keyring_path="${2:?keyring_path required}"
if [ ! -f "$local_gpg_path" ]; then
log WARN "Local Docker GPG key not found: ${local_gpg_path}"
return 1
fi
run_root mkdir -p "$(dirname "$keyring_path")"
run_root install -m 0644 "$local_gpg_path" "$keyring_path"
run_root chmod a+r "$keyring_path" || true
log INFO "Docker GPG key installed from local: ${local_gpg_path} -> ${keyring_path}"
return 0
}
### Ensure Docker GPG key exists, using online if reachable; otherwise local-only.
### Offline policy: if local key missing -> DO NOT proceed (exit).
### @param is_online int 0 online; 1 offline
### @return 0 Success; exits with EC_OFFLINE_NO_KEY when offline and no local key
### @require none
ensure_docker_gpg_key() {
local is_online="${1:?is_online required}"
# > If keyring already exists, reuse it (idempotent)
if [ -f "$DOCKER_KEYRING_PATH" ]; then
log INFO "Docker keyring already exists: ${DOCKER_KEYRING_PATH}"
run_root chmod a+r "$DOCKER_KEYRING_PATH" || true
return 0
fi
# > Determine local key candidate paths (priority order)
local script_dir local_candidate
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -n "$LOCAL_DOCKER_GPG" ]; then
local_candidate="$LOCAL_DOCKER_GPG"
elif [ -f "${script_dir}/docker.gpg" ]; then
local_candidate="${script_dir}/docker.gpg"
else
local_candidate=""
fi
local gpg_url
gpg_url="${DOCKER_APT_MIRROR%/}/gpg"
if [ "$is_online" -eq 0 ]; then
# Online: try online key fetch first; if fails, fallback to local if present.
log DEBUG "Online mode: attempt online key install, fallback to local."
if install_key_from_online "$gpg_url" "$DOCKER_KEYRING_PATH"; then
return 0
fi
if [ -n "$local_candidate" ] && install_key_from_local "$local_candidate" "$DOCKER_KEYRING_PATH"; then
return 0
fi
log ERROR "Failed to install Docker GPG key (online fetch failed and no usable local key)."
exit "$EC_DEPENDENCY"
fi
# Offline: strictly local only; if missing -> do not proceed
log INFO "Offline mode: install Docker GPG key from local only."
if [ -n "$local_candidate" ] && install_key_from_local "$local_candidate" "$DOCKER_KEYRING_PATH"; then
return 0
fi
log ERROR "Offline and local Docker GPG key is missing. Will NOT proceed (per policy)."
exit "$EC_OFFLINE_NO_KEY"
}
# ==============================================================================
# Docker Repo Configuration
# ==============================================================================
### Configure Docker APT repository list file.
### @return 0 Success; exits on apt update failures
### @require dpkg, lsb_release, tee, apt-get
configure_docker_repo() {
require_cmd dpkg || exit "$EC_DEPENDENCY"
require_cmd lsb_release || exit "$EC_DEPENDENCY"
require_cmd tee || exit "$EC_DEPENDENCY"
local codename arch list_file
codename="$(lsb_release -cs)"
arch="$(dpkg --print-architecture)"
list_file="/etc/apt/sources.list.d/docker.list"
log INFO "Configuring Docker APT repo: ${DOCKER_APT_MIRROR} (${codename}, ${arch})"
# > Only touch docker repo; do not touch system sources.list
run_root tee "$list_file" >/dev/null <<EOF
deb [arch=${arch} signed-by=${DOCKER_KEYRING_PATH}] ${DOCKER_APT_MIRROR} ${codename} stable
EOF
if ! run_root apt-get update; then
log ERROR "apt-get update failed after configuring Docker repo."
exit "$EC_APT_FAILURE"
fi
log INFO "Docker APT repo configured: ${list_file}"
}
# ==============================================================================
# Docker Installation
# ==============================================================================
### Resolve Docker package version string from APT cache.
### @param docker_version string Desired version ("20.10" or "20.10.15")
### @return 0 Success and echoes full apt version string; exits if not found
### @require apt-cache, awk, grep, sort, head
resolve_docker_version() {
local docker_version="${1:?docker_version required}"
require_cmd apt-cache || exit "$EC_DEPENDENCY"
require_cmd awk || exit "$EC_DEPENDENCY"
require_cmd grep || exit "$EC_DEPENDENCY"
require_cmd sort || exit "$EC_DEPENDENCY"
require_cmd head || exit "$EC_DEPENDENCY"
local resolved=""
# > apt-cache madison output includes epoch, keep it for apt-get install
if [[ "$docker_version" =~ ^[0-9]+\.[0-9]+$ ]]; then
# Pick newest patch/build for that major.minor
resolved="$(
apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${docker_version}([.-]|\~)" \
| sort -rV \
| head -1 || true
)"
elif [[ "$docker_version" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
resolved="$(
apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${docker_version}.*" \
| head -1 || true
)"
else
log ERROR "Invalid DOCKER_VERSION format: ${docker_version} (expect 20.10 or 20.10.15)"
exit "$EC_GENERAL"
fi
if [ -z "$resolved" ]; then
log ERROR "Cannot find Docker version '${docker_version}' from APT. Check repo/mirror and apt proxy."
exit "$EC_APT_FAILURE"
fi
echo "$resolved"
return 0
}
### Install Docker packages via APT.
### @return 0 Success; exits on failure
### @require apt-get, systemctl
install_docker_packages() {
require_cmd apt-get || exit "$EC_DEPENDENCY"
local full_version
full_version="$(resolve_docker_version "$DOCKER_VERSION")"
log INFO "Installing Docker packages: docker-ce=${full_version}"
# > Compose: use docker-compose-plugin (no curl downloading binaries)
if ! run_root apt-get install -y \
"docker-ce=${full_version}" \
"docker-ce-cli=${full_version}" \
"docker-ce-rootless-extras=${full_version}" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin; then
log ERROR "Docker installation failed."
exit "$EC_APT_FAILURE"
fi
# > Optional: provide docker-compose legacy command compatibility
if ! command -v docker-compose >/dev/null 2>&1; then
if [ -x /usr/libexec/docker/cli-plugins/docker-compose ]; then
run_root ln -sf /usr/libexec/docker/cli-plugins/docker-compose /usr/local/bin/docker-compose || true
fi
fi
log INFO "Docker packages installed."
}
### Pin Docker packages to avoid unintended upgrades.
### @return 0 Success; non-zero on failures (non-fatal)
### @require apt-mark
pin_docker_packages() {
if ! command -v apt-mark >/dev/null 2>&1; then
log WARN "apt-mark not found; skip pinning."
return 0
fi
log INFO "Holding Docker packages (prevent auto-upgrade)..."
run_root apt-mark hold \
docker-ce docker-ce-cli docker-ce-rootless-extras containerd.io \
docker-buildx-plugin docker-compose-plugin >/dev/null 2>&1 || true
return 0
}
### Enable and start Docker service, then verify versions.
### @return 0 Success; exits on failure to enable docker
### @require systemctl, docker
enable_docker_service() {
require_cmd systemctl || exit "$EC_DEPENDENCY"
log INFO "Enabling and starting docker service..."
run_root systemctl enable --now docker
# > Verification should not hard-fail the whole script
if command -v docker >/dev/null 2>&1; then
docker --version || true
docker compose version || true
fi
if command -v docker-compose >/dev/null 2>&1; then
docker-compose --version || true
fi
log INFO "Docker service enabled."
}
# ==============================================================================
# Main
# ==============================================================================
### Main entrypoint.
### @return 0 Success; non-zero on failure
### @require none
main() {
init_traps
log INFO "Starting Docker installer (v${SCRIPT_VERSION})..."
check_platform
ensure_prerequisites
local is_online=1
if detect_public_network; then
is_online=0
fi
ensure_docker_gpg_key "$is_online"
configure_docker_repo
install_docker_packages
pin_docker_packages
enable_docker_service
log INFO "All tasks completed successfully."
exit "$EC_OK"
}
main "$@"

View File

@@ -0,0 +1,24 @@
# master节点
sudo apt update
sudo apt install -y apt-cacher-ng
systemctl status apt-cacher-ng
# worker节点
sudo tee /etc/apt/apt.conf.d/01proxy <<EOF
Acquire::http::Proxy "http://10.22.57.8:3142";
Acquire::https::Proxy "http://10.22.57.8:3142";
EOF
ssh root@${server} "printf '%s\n' \
'Acquire::http::Proxy \"http://10.22.57.8:3142\";' \
'Acquire::https::Proxy \"http://10.22.57.8:3142\";' \
| tee /etc/apt/apt.conf.d/01proxy >/dev/null"

View File

@@ -0,0 +1,504 @@
#!/usr/bin/env bash
#==============================================================================
# APT Source Switcher - Ubuntu -> TUNA (Tsinghua University) Mirror
#
# Author: Smith Wang
# Version: 1.0.0
# License: MIT
#==============================================================================
# Module Dependencies:
# - bash (>= 5.0)
# - coreutils: cp, mv, mkdir, date, id, chmod, chown
# - util-linux / distro tools: (optional) lsb_release
# - text tools: sed, awk, grep
# - apt: apt-get
#
# Notes:
# - Ubuntu 24.04 typically uses Deb822 sources file: /etc/apt/sources.list.d/ubuntu.sources
# - Ubuntu 20.04/22.04 often uses traditional /etc/apt/sources.list
#==============================================================================
set -euo pipefail
IFS=$'\n\t'
umask 022
#------------------------------------------------------------------------------
# Global Constants
#------------------------------------------------------------------------------
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="1.0.0"
readonly TUNA_UBUNTU_URI="https://mirrors.tuna.tsinghua.edu.cn/ubuntu/"
readonly DEFAULT_BACKUP_DIR="/etc/apt/backup"
readonly APT_SOURCES_LIST="/etc/apt/sources.list"
readonly APT_DEB822_SOURCES="/etc/apt/sources.list.d/ubuntu.sources"
# Log levels: DEBUG=0, INFO=1, WARN=2, ERROR=3
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
#------------------------------------------------------------------------------
# Runtime Variables (defaults)
#------------------------------------------------------------------------------
log_level="$LOG_LEVEL_INFO"
backup_dir="$DEFAULT_BACKUP_DIR"
do_update="false"
assume_yes="false"
ubuntu_codename=""
ubuntu_version_id=""
sources_mode="" # "deb822" or "list"
#------------------------------------------------------------------------------
# Function Call Graph (ASCII)
#
# main
# |
# +--> parse_args
# +--> setup_traps
# +--> require_root
# +--> detect_ubuntu
# | |
# | +--> read_os_release
# |
# +--> choose_sources_mode
# +--> ensure_backup_dir
# +--> backup_sources
# +--> confirm_action
# +--> apply_tuna_sources
# | |
# | +--> write_sources_list_tuna
# | +--> patch_deb822_sources_tuna
# |
# +--> apt_update (optional)
# +--> summary
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Logging
#------------------------------------------------------------------------------
### Log message with level.
# @param level int Numeric log level (0=DEBUG,1=INFO,2=WARN,3=ERROR)
# @param message string Message to print
# @return 0 success
# @require date printf
log() {
local level="$1"
local message="$2"
if [[ "$level" -lt "$log_level" ]]; then
return 0
fi
local level_name="INFO"
case "$level" in
0) level_name="DEBUG" ;;
1) level_name="INFO" ;;
2) level_name="WARN" ;;
3) level_name="ERROR" ;;
*) level_name="INFO" ;;
esac
# > Use RFC3339-ish timestamp to help operations & auditing
printf '%s [%s] %s: %s\n' "$(date '+%Y-%m-%d %H:%M:%S')" "$level_name" "$SCRIPT_NAME" "$message" >&2
}
### Convenience wrappers.
# @return 0
log_debug() { log "$LOG_LEVEL_DEBUG" "$1"; }
log_info() { log "$LOG_LEVEL_INFO" "$1"; }
log_warn() { log "$LOG_LEVEL_WARN" "$1"; }
log_error() { log "$LOG_LEVEL_ERROR" "$1"; }
#------------------------------------------------------------------------------
# Error handling / traps
#------------------------------------------------------------------------------
### Trap handler for unexpected errors.
# @param exit_code int Exit code
# @param line_no int Line number where error occurred
# @param cmd string The command that failed
# @return 0
# @require printf
on_err() {
local exit_code="$1"
local line_no="$2"
local cmd="$3"
log_error "Script failed (exit=${exit_code}) at line ${line_no}: ${cmd}"
}
### Cleanup handler (reserved for future extension).
# @return 0
# @require true
on_exit() {
true
}
### Setup traps for ERR and EXIT.
# @return 0
# @require trap
setup_traps() {
# > Preserve error context with BASH_LINENO and BASH_COMMAND
trap 'on_err "$?" "${LINENO}" "${BASH_COMMAND}"' ERR
trap 'on_exit' EXIT
}
#------------------------------------------------------------------------------
# Utility / validation
#------------------------------------------------------------------------------
### Print usage.
# @return 0
# @require cat
usage() {
cat <<'EOF'
Usage:
sudo ./apt_tuna_switch.sh [options]
Options:
-y, --yes Non-interactive; do not prompt.
-u, --update Run "apt-get update" after switching.
-b, --backup-dir Backup directory (default: /etc/apt/backup)
-d, --debug Enable DEBUG logs.
-h, --help Show help.
Examples:
sudo ./apt_tuna_switch.sh -y -u
sudo ./apt_tuna_switch.sh --backup-dir /root/apt-bak --update
EOF
}
### Parse CLI arguments.
# @param args string[] CLI args
# @return 0 success
# @require printf
parse_args() {
while [[ $# -gt 0 ]]; do
case "$1" in
-y|--yes)
assume_yes="true"
shift
;;
-u|--update)
do_update="true"
shift
;;
-b|--backup-dir)
if [[ $# -lt 2 ]]; then
log_error "Missing value for --backup-dir"
usage
exit 2
fi
backup_dir="$2"
shift 2
;;
-d|--debug)
log_level="$LOG_LEVEL_DEBUG"
shift
;;
-h|--help)
usage
exit 0
;;
*)
log_error "Unknown argument: $1"
usage
exit 2
;;
esac
done
}
### Ensure running as root.
# @return 0 if root; exit otherwise
# @require id
require_root() {
if [[ "$(id -u)" -ne 0 ]]; then
log_error "This script must be run as root. Try: sudo ./${SCRIPT_NAME}"
exit 1
fi
}
### Read /etc/os-release fields.
# @return 0
# @require awk grep
read_os_release() {
if [[ ! -r /etc/os-release ]]; then
log_error "Cannot read /etc/os-release"
exit 1
fi
# > Parse key fields safely
local os_id
os_id="$(awk -F= '$1=="ID"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
ubuntu_version_id="$(awk -F= '$1=="VERSION_ID"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
ubuntu_codename="$(awk -F= '$1=="VERSION_CODENAME"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
if [[ "$os_id" != "ubuntu" ]]; then
log_error "Unsupported OS ID: ${os_id:-unknown}. This script supports Ubuntu only."
exit 1
fi
if [[ -z "$ubuntu_version_id" ]]; then
log_error "Failed to detect Ubuntu VERSION_ID from /etc/os-release"
exit 1
fi
# > For some environments, VERSION_CODENAME may be empty; try UBUNTU_CODENAME
if [[ -z "$ubuntu_codename" ]]; then
ubuntu_codename="$(awk -F= '$1=="UBUNTU_CODENAME"{gsub(/"/,"",$2); print $2}' /etc/os-release | head -n1 || true)"
fi
}
### Detect supported Ubuntu version and codename.
# @return 0 success; exit otherwise
# @require awk
detect_ubuntu() {
read_os_release
case "$ubuntu_version_id" in
"20.04") ubuntu_codename="${ubuntu_codename:-focal}" ;;
"22.04") ubuntu_codename="${ubuntu_codename:-jammy}" ;;
"24.04") ubuntu_codename="${ubuntu_codename:-noble}" ;;
*)
log_error "Unsupported Ubuntu version: ${ubuntu_version_id}. Supported: 20.04/22.04/24.04"
exit 1
;;
esac
if [[ -z "$ubuntu_codename" ]]; then
log_error "Failed to determine Ubuntu codename."
exit 1
fi
log_info "Detected Ubuntu ${ubuntu_version_id} (${ubuntu_codename})"
}
### Decide which sources format to manage.
# @return 0
# @require test
choose_sources_mode() {
if [[ -f "$APT_DEB822_SOURCES" ]]; then
sources_mode="deb822"
elif [[ -f "$APT_SOURCES_LIST" ]]; then
sources_mode="list"
else
# > Defensive: if neither exists, still proceed by creating sources.list
sources_mode="list"
fi
log_info "Sources mode: ${sources_mode}"
}
### Ensure backup directory exists.
# @param backup_dir string Directory path
# @return 0
# @require mkdir
ensure_backup_dir() {
local dir="$1"
if [[ -z "$dir" ]]; then
log_error "Backup directory is empty."
exit 1
fi
mkdir -p "$dir"
log_debug "Backup directory ensured: $dir"
}
### Backup an APT sources file if it exists.
# @param src_path string File path to backup
# @param backup_dir string Backup directory
# @return 0
# @require cp date
backup_file_if_exists() {
local src_path="$1"
local dir="$2"
if [[ ! -e "$src_path" ]]; then
log_warn "Skip backup (not found): $src_path"
return 0
fi
local ts
ts="$(date '+%Y%m%d-%H%M%S')"
local base
base="$(basename "$src_path")"
local dst="${dir}/${base}.${ts}.bak"
cp -a "$src_path" "$dst"
log_info "Backed up: $src_path -> $dst"
}
### Backup relevant source files.
# @return 0
# @require cp
backup_sources() {
backup_file_if_exists "$APT_SOURCES_LIST" "$backup_dir"
backup_file_if_exists "$APT_DEB822_SOURCES" "$backup_dir"
}
### Ask for confirmation unless --yes is given.
# @return 0 if confirmed; exit otherwise
# @require read
confirm_action() {
if [[ "$assume_yes" == "true" ]]; then
log_info "Non-interactive mode: --yes"
return 0
fi
log_warn "About to replace APT sources with TUNA mirror:"
log_warn " ${TUNA_UBUNTU_URI}"
log_warn "This will modify system APT source configuration."
printf "Continue? [y/N]: " >&2
local ans=""
read -r ans
case "$ans" in
y|Y|yes|YES) return 0 ;;
*) log_info "Cancelled by user."; exit 0 ;;
esac
}
#------------------------------------------------------------------------------
# Core actions
#------------------------------------------------------------------------------
### Write traditional /etc/apt/sources.list using TUNA mirror.
# @param codename string Ubuntu codename (focal/jammy/noble)
# @return 0
# @require cat chmod chown mv
write_sources_list_tuna() {
local codename="$1"
local tmp_file
tmp_file="$(mktemp)"
# > Provide standard suites: release, updates, backports, security
cat >"$tmp_file" <<EOF
#------------------------------------------------------------------------------#
# Ubuntu ${codename} - TUNA Mirror
# Generated by: ${SCRIPT_NAME} v${SCRIPT_VERSION}
# Mirror: ${TUNA_UBUNTU_URI}
#------------------------------------------------------------------------------#
deb ${TUNA_UBUNTU_URI} ${codename} main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-updates main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-backports main restricted universe multiverse
deb ${TUNA_UBUNTU_URI} ${codename}-security main restricted universe multiverse
# If you want source packages, uncomment the following lines:
# deb-src ${TUNA_UBUNTU_URI} ${codename} main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-updates main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-backports main restricted universe multiverse
# deb-src ${TUNA_UBUNTU_URI} ${codename}-security main restricted universe multiverse
EOF
chmod 0644 "$tmp_file"
chown root:root "$tmp_file"
# > Atomic replace
mkdir -p "$(dirname "$APT_SOURCES_LIST")"
mv -f "$tmp_file" "$APT_SOURCES_LIST"
log_info "Updated: $APT_SOURCES_LIST"
}
### Patch Deb822 ubuntu.sources to use TUNA mirror.
# @param deb822_file string Path to ubuntu.sources
# @param tuna_uri string The TUNA mirror base URI
# @return 0
# @require sed cp mktemp chmod chown mv grep
patch_deb822_sources_tuna() {
local deb822_file="$1"
local tuna_uri="$2"
if [[ ! -f "$deb822_file" ]]; then
log_warn "Deb822 sources file not found: $deb822_file"
return 0
fi
local tmp_file
tmp_file="$(mktemp)"
cp -a "$deb822_file" "$tmp_file"
# > Replace any "URIs:" line to TUNA; keep other Deb822 fields unchanged.
# > Some systems may have multiple stanzas; this applies globally.
sed -i -E "s|^URIs:[[:space:]]+.*$|URIs: ${tuna_uri}|g" "$tmp_file"
# > Defensive check: ensure we still have at least one URIs line
if ! grep -qE '^URIs:[[:space:]]+' "$tmp_file"; then
log_error "Deb822 patch failed: no 'URIs:' line found after edit."
exit 1
fi
chmod 0644 "$tmp_file"
chown root:root "$tmp_file"
mv -f "$tmp_file" "$deb822_file"
log_info "Patched Deb822 sources: $deb822_file"
}
### Apply TUNA sources according to detected mode.
# @return 0
# @require true
apply_tuna_sources() {
case "$sources_mode" in
deb822)
patch_deb822_sources_tuna "$APT_DEB822_SOURCES" "$TUNA_UBUNTU_URI"
;;
list)
write_sources_list_tuna "$ubuntu_codename"
;;
*)
log_error "Unknown sources mode: $sources_mode"
exit 1
;;
esac
}
### Run apt-get update if requested.
# @return 0
# @require apt-get
apt_update() {
if [[ "$do_update" != "true" ]]; then
log_info "Skip apt-get update (use --update to enable)."
return 0
fi
log_info "Running: apt-get update"
# > Use noninteractive frontend to reduce prompts in some envs
DEBIAN_FRONTEND=noninteractive apt-get update
log_info "apt-get update completed."
}
### Print summary.
# @return 0
summary() {
log_info "Done."
log_info "Backup directory: ${backup_dir}"
log_info "Mirror applied: ${TUNA_UBUNTU_URI}"
log_info "Ubuntu: ${ubuntu_version_id} (${ubuntu_codename}), mode: ${sources_mode}"
}
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
### Main entry.
# @param args string[] CLI args
# @return 0 success; non-zero otherwise
# @require bash
main() {
parse_args "$@"
setup_traps
require_root
detect_ubuntu
choose_sources_mode
ensure_backup_dir "$backup_dir"
backup_sources
confirm_action
apply_tuna_sources
apt_update
summary
}
main "$@"

View File

@@ -0,0 +1,767 @@
#!/bin/bash
###############################################################################
# NGINX Installation Script for China Mainland with Mirror Acceleration
###############################################################################
# @author Advanced Bash Shell Engineer
# @version 1.0.0
# @license MIT
# @created 2026-01-19
# @desc Production-grade NGINX installation script with China mirror support
# Supports Ubuntu 18.04/20.04/22.04/24.04 with version pinning
###############################################################################
###############################################################################
# GLOBAL CONSTANTS
###############################################################################
readonly SCRIPT_NAME="$(basename "${BASH_SOURCE[0]}")"
readonly SCRIPT_VERSION="1.0.0"
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Color codes for output
readonly COLOR_RED="\033[0;31m"
readonly COLOR_GREEN="\033[0;32m"
readonly COLOR_YELLOW="\033[1;33m"
readonly COLOR_BLUE="\033[0;34m"
readonly COLOR_RESET="\033[0m"
# Log levels
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
# Default configuration
readonly DEFAULT_NGINX_VERSION="stable"
readonly DEFAULT_MIRROR="ustc"
readonly SUPPORTED_UBUNTU_VERSIONS=("18.04" "20.04" "22.04" "24.04")
# Mirror configurations (China mainland accelerated sources)
declare -A MIRROR_URLS=(
["aliyun"]="http://mirrors.aliyun.com/nginx"
["tsinghua"]="https://mirrors.tuna.tsinghua.edu.cn/nginx"
["ustc"]="https://mirrors.ustc.edu.cn/nginx/ubuntu"
["official"]="http://nginx.org"
)
declare -A MIRROR_KEY_URLS=(
["aliyun"]="http://mirrors.aliyun.com/nginx/keys/nginx_signing.key"
["tsinghua"]="https://mirrors.tuna.tsinghua.edu.cn/nginx/keys/nginx_signing.key"
["ustc"]="https://mirrors.ustc.edu.cn/nginx/keys/nginx_signing.key"
["official"]="https://nginx.org/keys/nginx_signing.key"
)
# Global variables
CURRENT_LOG_LEVEL="${LOG_LEVEL_INFO}"
NGINX_VERSION="${DEFAULT_NGINX_VERSION}"
MIRROR_SOURCE="${DEFAULT_MIRROR}"
FORCE_REINSTALL=false
DRY_RUN=false
###############################################################################
# ERROR HANDLING & TRAPS
###############################################################################
set -euo pipefail
IFS=$'\n\t'
###
### Cleanup function for graceful exit
### @param none
### @return void
### @require none
###
cleanup() {
local exit_code=$?
if [[ ${exit_code} -ne 0 ]]; then
log_error "脚本退出,错误码: ${exit_code}"
fi
# > Perform cleanup operations if needed
return "${exit_code}"
}
trap cleanup EXIT
trap 'log_error "用户中断脚本执行"; exit 130' INT TERM
###############################################################################
# LOGGING FUNCTIONS
###############################################################################
###
### Core logging function with level-based filtering
### @param log_level integer Log level (0-3)
### @param message string Message to log
### @param color string Color code for output
### @return void
### @require none
###
_log() {
local log_level=$1
local message=$2
local color=$3
local level_name=$4
if [[ ${log_level} -ge ${CURRENT_LOG_LEVEL} ]]; then
local timestamp
timestamp="$(date '+%Y-%m-%d %H:%M:%S')"
echo -e "${color}[${timestamp}] [${level_name}] ${message}${COLOR_RESET}" >&2
fi
}
###
### Debug level logging
### @param message string Debug message
### @return void
### @require none
###
log_debug() {
_log "${LOG_LEVEL_DEBUG}" "$1" "${COLOR_BLUE}" "调试"
}
###
### Info level logging
### @param message string Info message
### @return void
### @require none
###
log_info() {
_log "${LOG_LEVEL_INFO}" "$1" "${COLOR_GREEN}" "信息"
}
###
### Warning level logging
### @param message string Warning message
### @return void
### @require none
###
log_warn() {
_log "${LOG_LEVEL_WARN}" "$1" "${COLOR_YELLOW}" "警告"
}
###
### Error level logging
### @param message string Error message
### @return void
### @require none
###
log_error() {
_log "${LOG_LEVEL_ERROR}" "$1" "${COLOR_RED}" "错误"
}
###############################################################################
# VALIDATION FUNCTIONS
###############################################################################
###
### Check if script is running with root privileges
### @param none
### @return 0 if root, 1 otherwise
### @require none
###
check_root_privileges() {
if [[ ${EUID} -ne 0 ]]; then
log_error "此脚本必须以 root 身份运行,或使用 sudo 执行"
return 1
fi
log_debug "已确认具备 root 权限"
return 0
}
###
### Validate Ubuntu version compatibility
### @param none
### @return 0 if supported, 1 otherwise
### @require lsb_release command
###
validate_ubuntu_version() {
local ubuntu_version
# > Check if lsb_release exists
if ! command -v lsb_release &> /dev/null; then
log_error "未找到 lsb_release 命令,无法识别 Ubuntu 版本。"
return 1
fi
ubuntu_version="$(lsb_release -rs)"
log_debug "检测到 Ubuntu 版本: ${ubuntu_version}"
# > Validate against supported versions
local supported=false
for version in "${SUPPORTED_UBUNTU_VERSIONS[@]}"; do
if [[ "${ubuntu_version}" == "${version}" ]]; then
supported=true
break
fi
done
if [[ "${supported}" == false ]]; then
log_error "Ubuntu ${ubuntu_version} 不受支持。支持的版本: ${SUPPORTED_UBUNTU_VERSIONS[*]}"
return 1
fi
log_info "Ubuntu ${ubuntu_version} 受支持"
return 0
}
###
### Validate mirror source selection
### @param mirror_name string 镜像源 name
### @return 0 if valid, 1 otherwise
### @require none
###
validate_mirror_source() {
local mirror_name=$1
if [[ ! -v MIRROR_URLS["${mirror_name}"] ]]; then
log_error "无效的镜像源: ${mirror_name}"
log_info "可用镜像源: ${!MIRROR_URLS[*]}"
return 1
fi
log_debug "镜像源 '${mirror_name}' 有效"
return 0
}
###
### Check network connectivity to mirror
### @param mirror_url string URL to test
### @return 0 if reachable, 1 otherwise
### @require curl
###
check_mirror_connectivity() {
local mirror_url=$1
local timeout=10
log_debug "正在测试镜像连通性: ${mirror_url}"
if curl -sSf --connect-timeout "${timeout}" --max-time "${timeout}" \
"${mirror_url}" -o /dev/null 2>/dev/null; then
log_debug "镜像 ${mirror_url} 可访问"
return 0
else
log_warn "镜像 ${mirror_url} 不可访问"
return 1
fi
}
###############################################################################
# SYSTEM PREPARATION FUNCTIONS
###############################################################################
###
### Install required system dependencies
### @param none
### @return 0 on success, 1 on failure
### @require apt-get
###
install_dependencies() {
log_info "正在安装系统依赖..."
local dependencies=(
"curl"
"gnupg2"
"ca-certificates"
"lsb-release"
"ubuntu-keyring"
"apt-transport-https"
)
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会安装: ${dependencies[*]}"
return 0
fi
# > Update package index first
if ! apt-get update -qq; then
log_error "更新软件包索引失败"
return 1
fi
# > Install dependencies
if ! apt-get install -y -qq "${dependencies[@]}"; then
log_error "安装依赖失败"
return 1
fi
log_info "依赖安装完成"
return 0
}
###
### Remove existing NGINX installation if present
### @param none
### @return 0 on success or if not installed
### @require apt-get, dpkg
###
remove_existing_nginx() {
log_info "正在检查是否已安装 NGINX..."
if ! dpkg -l | grep -q "^ii.*nginx"; then
log_info "未发现已安装的 NGINX"
return 0
fi
if [[ "${FORCE_REINSTALL}" == false ]]; then
log_warn "NGINX 已安装。如需重装请使用 --force。"
return 1
fi
log_info "正在卸载已安装的 NGINX..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会卸载已安装的 NGINX"
return 0
fi
# > Stop NGINX service if running
if systemctl is-active --quiet nginx 2>/dev/null; then
systemctl stop nginx || true
fi
# > Remove NGINX packages
if ! apt-get remove --purge -y nginx nginx-common nginx-full 2>/dev/null; then
log_warn "部分 NGINX 软件包可能未能完全卸载(可忽略)"
fi
# > Clean up configuration files
apt-get autoremove -y -qq || true
log_info "已卸载现有 NGINX"
return 0
}
###############################################################################
# NGINX INSTALLATION FUNCTIONS
###############################################################################
###
### Import NGINX GPG signing key
### @param mirror_name string 镜像源 name
### @return 0 on success, 1 on failure
### @require curl, gpg
###
import_nginx_gpg_key() {
local mirror_name=$1
local key_url="${MIRROR_KEY_URLS[${mirror_name}]}"
local keyring_path="/usr/share/keyrings/nginx-archive-keyring.gpg"
log_info "正在导入 NGINX GPG 签名密钥(来源:${mirror_name}..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会从以下地址导入 GPG 密钥: ${key_url}"
return 0
fi
# > Remove old keyring if exists
[[ -f "${keyring_path}" ]] && rm -f "${keyring_path}"
# > Download and import GPG key
if ! curl -fsSL "${key_url}" | gpg --dearmor -o "${keyring_path}" 2>/dev/null; then
log_error "导入 GPG 密钥失败: ${key_url}"
return 1
fi
# > Verify the key was imported correctly
if ! gpg --dry-run --quiet --no-keyring --import --import-options import-show \
"${keyring_path}" &>/dev/null; then
log_error "GPG 密钥校验失败"
return 1
fi
# > Set proper permissions
chmod 644 "${keyring_path}"
log_info "GPG 密钥导入并校验成功"
return 0
}
###
### Configure NGINX APT repository
### @param mirror_name string 镜像源 name
### @return 0 on success, 1 on failure
### @require lsb_release
###
configure_nginx_repository() {
local mirror_name=$1
local mirror_url="${MIRROR_URLS[${mirror_name}]}"
local codename
codename="$(lsb_release -cs)"
local repo_file="/etc/apt/sources.list.d/nginx.list"
local keyring_path="/usr/share/keyrings/nginx-archive-keyring.gpg"
# > 不同镜像源目录结构可能不同:
# > - 官方/部分镜像:.../packages/ubuntu
# > - USTC.../ubuntu
local repo_base
case "${mirror_name}" in
ustc)
repo_base="${mirror_url}"
;;
*)
repo_base="${mirror_url}/packages/ubuntu"
;;
esac
log_info "正在配置 NGINX 软件源Ubuntu ${codename}..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会配置软件源deb [signed-by=${keyring_path}] ${repo_base} ${codename} nginx"
return 0
fi
# > Create repository configuration
local repo_config="deb [signed-by=${keyring_path}] ${repo_base} ${codename} nginx"
echo "${repo_config}" | tee "${repo_file}" > /dev/null
log_debug "已生成软件源配置文件:${repo_file}"
log_debug "软件源地址:${repo_base} ${codename}"
log_info "NGINX 软件源配置完成"
return 0
}
###
### Configure APT pinning preferences for NGINX
### @param none
### @return 0 on success
### @require none
###
configure_apt_pinning() {
local pref_file="/etc/apt/preferences.d/99nginx"
log_info "正在配置 APT Pin 优先级..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会配置 APT Pin 优先级"
return 0
fi
# > Create pinning configuration for priority
cat > "${pref_file}" <<EOF
Package: *
Pin: origin nginx.org
Pin: release o=nginx
Pin-Priority: 900
EOF
log_debug "APT Pin 配置写入:${pref_file}"
log_info "APT Pin 优先级配置完成"
return 0
}
###
### Install NGINX package
### @param version string NGINX version to install (stable/mainline/specific)
### @return 0 on success, 1 on failure
### @require apt-get
###
install_nginx_package() {
local version=$1
local package_spec="nginx"
log_info "正在安装 NGINX ${version}..."
# > Update package index with new repository
if [[ "${DRY_RUN}" == false ]]; then
if ! apt-get update -qq; then
log_error "更新软件包索引失败"
return 1
fi
fi
# > Handle version specification
if [[ "${version}" != "stable" && "${version}" != "mainline" ]]; then
# > Specific version requested
package_spec="nginx=${version}"
log_debug "安装指定版本:${package_spec}"
else
log_debug "从软件源安装:${version}"
fi
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会安装软件包:${package_spec}"
return 0
fi
# > Install NGINX
if ! DEBIAN_FRONTEND=noninteractive apt-get install -y -qq "${package_spec}"; then
log_error "安装 NGINX 失败"
return 1
fi
log_info "NGINX 安装完成"
return 0
}
###
### Verify NGINX installation
### @param none
### @return 0 on success, 1 on failure
### @require nginx
###
verify_nginx_installation() {
log_info "正在验证 NGINX 安装结果..."
# > Check if nginx binary exists
if ! command -v nginx &> /dev/null; then
log_error "未在 PATH 中找到 nginx 可执行文件"
return 1
fi
# > Get and display version
local nginx_version_output
nginx_version_output="$(nginx -v 2>&1)"
log_info "已安装: ${nginx_version_output}"
# > Test configuration
if ! nginx -t &>/dev/null; then
log_error "NGINX 配置文件校验失败"
return 1
fi
log_info "NGINX 安装验证通过"
return 0
}
###
### Enable and start NGINX service
### @param none
### @return 0 on success, 1 on failure
### @require systemctl
###
enable_nginx_service() {
log_info "正在设置 NGINX 开机自启并启动服务..."
if [[ "${DRY_RUN}" == true ]]; then
log_info "[演练模式] 将会启用并启动 NGINX 服务"
return 0
fi
# > Enable service to start on boot
if ! systemctl enable nginx &>/dev/null; then
log_error "设置 NGINX 开机自启失败"
return 1
fi
# > Start the service
if ! systemctl start nginx; then
log_error "启动 NGINX 服务失败"
return 1
fi
# > Verify service is running
if ! systemctl is-active --quiet nginx; then
log_error "NGINX 服务未处于运行状态"
return 1
fi
log_info "NGINX 服务已启用并启动"
return 0
}
###############################################################################
# MAIN ORCHESTRATION
###############################################################################
###
### Display usage information
### @param none
### @return void
### @require none
###
show_usage() {
cat <<EOF
Usage: ${SCRIPT_NAME} [选项]
NGINX 安装脚本(面向中国大陆镜像加速) v${SCRIPT_VERSION}
选项:
-v, --version VERSION 指定要安装的 NGINX 版本
stable/mainline/1.24.0/...
默认:${DEFAULT_NGINX_VERSION}
-m, --mirror MIRROR 选择镜像源
aliyun/tsinghua/ustc/official
默认:${DEFAULT_MIRROR}
-f, --force 若已安装则强制重装
-d, --dry-run 演练模式:仅展示将执行的操作,不真正执行
--debug 开启调试日志
-h, --help 显示帮助信息
示例:
# 使用默认镜像USTC安装稳定版
sudo ${SCRIPT_NAME}
# 使用清华镜像安装指定版本
sudo ${SCRIPT_NAME} --version 1.24.0 --mirror tsinghua
# 强制重装并开启调试
sudo ${SCRIPT_NAME} --force --debug
# 演练模式预览
sudo ${SCRIPT_NAME} --dry-run
支持的 Ubuntu 版本:
${SUPPORTED_UBUNTU_VERSIONS[*]}
可用镜像源:
${!MIRROR_URLS[*]}
EOF
}
###
### Parse command line arguments
### @param args array Command line arguments
### @return 0 on success, 1 on invalid arguments
### @require none
###
parse_arguments() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--version)
NGINX_VERSION="$2"
shift 2
;;
-m|--mirror)
MIRROR_SOURCE="$2"
shift 2
;;
-f|--force)
FORCE_REINSTALL=true
shift
;;
-d|--dry-run)
DRY_RUN=true
shift
;;
--debug)
CURRENT_LOG_LEVEL="${LOG_LEVEL_DEBUG}"
shift
;;
-h|--help)
show_usage
exit 0
;;
*)
log_error "未知参数: $1"
show_usage
exit 1
;;
esac
done
return 0
}
###
### Main installation workflow
### @param none
### @return 0 on success, 1 on failure
### @require all functions above
###
main() {
log_info "========================================="
log_info "NGINX 安装脚本 v${SCRIPT_VERSION}"
log_info "========================================="
# > Step 1: Pre-flight checks
log_info "步骤 1/8执行预检查..."
check_root_privileges || return 1
validate_ubuntu_version || return 1
validate_mirror_source "${MIRROR_SOURCE}" || return 1
# > Step 2: Check mirror connectivity
log_info "步骤 2/8检查镜像连通性..."
if ! check_mirror_connectivity "${MIRROR_URLS[${MIRROR_SOURCE}]}"; then
log_warn "主镜像不可用,尝试回退方案..."
# > Fallback to official if mirror fails
if [[ "${MIRROR_SOURCE}" != "official" ]]; then
MIRROR_SOURCE="official"
log_info "已回退到官方源"
fi
fi
# > Step 3: Install dependencies
log_info "步骤 3/8安装依赖..."
install_dependencies || return 1
# > Step 4: Handle existing installation
log_info "步骤 4/8检查已安装版本..."
remove_existing_nginx || return 1
# > Step 5: Import GPG key
log_info "步骤 5/8导入 NGINX GPG 密钥..."
import_nginx_gpg_key "${MIRROR_SOURCE}" || return 1
# > Step 6: Configure repository
log_info "步骤 6/8配置 NGINX 软件源..."
configure_nginx_repository "${MIRROR_SOURCE}" || return 1
configure_apt_pinning || return 1
# > Step 7: Install NGINX
log_info "步骤 7/8安装 NGINX..."
install_nginx_package "${NGINX_VERSION}" || return 1
verify_nginx_installation || return 1
# > Step 8: Enable service
log_info "步骤 8/8启用 NGINX 服务..."
enable_nginx_service || return 1
log_info "========================================="
log_info "✓ NGINX 安装完成!"
log_info "========================================="
if [[ "${DRY_RUN}" == false ]]; then
log_info "服务状态: $(systemctl is-active nginx)"
log_info "NGINX 版本: $(nginx -v 2>&1 | cut -d'/' -f2)"
log_info ""
log_info "常用命令:"
log_info " 启动: sudo systemctl start nginx"
log_info " 停止: sudo systemctl stop nginx"
log_info " 重启: sudo systemctl restart nginx"
log_info " 状态: sudo systemctl status nginx"
log_info " 校验配置: sudo nginx -t"
fi
return 0
}
###############################################################################
# SCRIPT ENTRY POINT
###############################################################################
# ASCII Flow Diagram - Function Call Hierarchy
# ┌─────────────────────────────────────────────────────────────┐
# │ MAIN() │
# └──────────────────┬──────────────────────────────────────────┘
# │
# ┌─────────────┼─────────────┬──────────────┬─────────────┐
# │ │ │ │ │
# ▼ ▼ ▼ ▼ ▼
# ┌─────────┐ ┌──────────┐ ┌─────────┐ ┌────────────┐ ┌─────────┐
# │Pre-flight│ │Install │ │Import │ │Configure │ │Install │
# │Checks │ │Deps │ │GPG Key │ │Repository │ │NGINX │
# └─────────┘ └──────────┘ └─────────┘ └────────────┘ └─────────┘
# │ │ │ │ │
# ├─check_root_privileges │ │ │
# ├─validate_ubuntu_version │ │ │
# └─validate_mirror_source │ │ │
# │ │ │ │
# └─install_dependencies │ │
# │ │ │
# └─import_nginx_gpg_key │
# │ │
# ├─configure_nginx_repository
# └─configure_apt_pinning
# │
# ├─install_nginx_package
# └─verify_nginx_installation
# Parse command line arguments
parse_arguments "$@"
# Execute main workflow
main
exit $?

View File

@@ -1,9 +1,8 @@
upstream proxy_server {
ip_hash;
server 192.168.0.2:30500;
server 192.168.0.4:30500;
server 192.168.0.5:30500;
server 192.168.0.6:30500;
server 192.168.1.4:30500;
server 192.168.1.3:30500;
server 192.168.1.5:30500;
}
server {
@@ -22,7 +21,7 @@ server {
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.xakny.io;
proxy_set_header Host fake-domain.sc-my-uav-260202.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@@ -6,7 +6,18 @@ cp kube_config_cluster.yml /root/.kube/config
kubectl apply -f k8s-dashboard.yaml
kubectl delete -f k8s-dashboard.yaml
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
kubectl create token read-only-user -n kubernetes-dashboard --duration=26280h 1 token的管控
# 删除旧的绑定(为了保险起见,避免残留)
kubectl delete clusterrolebinding admin-user
# 重新创建绑定
kubectl create clusterrolebinding admin-user \
--clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:admin-user 3 重新生成token
kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
## 你无法查看已经生成的 Token 列表。
kubectl apply -f k8s-nfs.yaml
kubectl delete -f k8s-nfs.yaml
@@ -16,10 +27,11 @@ kubectl -n kube-system describe pod $(kubectl -n kube-system get pods | grep nfs
kubectl apply -f k8s-nfs-test.yaml
kubectl delete -f k8s-nfs-test.yaml
# 在NFS-Server机器上执行
cd /var/lib/docker/nfs_data
kubectl create ns xakny
kubectl create ns sc-my-uav-260202
kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml
@@ -37,6 +49,9 @@ kubectl delete -f k8s-rabbitmq.yaml
kubectl apply -f k8s-redis.yaml
kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-influxdb.yaml
kubectl delete -f k8s-influxdb.yaml
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml

View File

@@ -1,6 +1,6 @@
export harbor_host=192.168.0.2:8033
export harbor_host=192.168.1.4:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -6,7 +6,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=xafkapp
export name_space=xayd
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force

View File

@@ -0,0 +1,862 @@
#!/usr/bin/env bash
#===============================================================================
# 名称: lvm_extend_with_disk.sh
# 描述: 使用新增裸盘扩展指定挂载目录对应的 LVM LVext4 / xfs
# 作者: WDD
# 版本: 1.0.0
# 许可证: MIT
#
# 依赖(命令): bash(>=5.0), coreutils, util-linux, lvm2, gdisk 或 parted,
# findmnt, lsblk, blkid, wipefs, partprobe, udevadm,
# resize2fs(用于ext4), xfs_growfs(用于xfs)
#
# 安全说明:
# - 本脚本会对“新增裸盘”进行 GPT 分区、清空签名(wipefs)、创建 PV 等破坏性操作。
# - 默认会做严格安全检查:若检测到磁盘疑似在使用/含签名/已有分区,将拒绝执行(可用 --force 覆盖部分检查)。
#===============================================================================
set -euo pipefail
IFS=$'\n\t'
#===============================================================================
# 全局常量定义区
#===============================================================================
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_VERSION="1.0.0"
# 日志级别DEBUG/INFO/WARN/ERROR
readonly LOG_LEVEL_DEFAULT="INFO"
#===============================================================================
# 全局变量(由参数控制)
#===============================================================================
log_level="${LOG_LEVEL_DEFAULT}"
dry_run="0"
force="0"
raw_disk="" # 例如 /dev/sdb
target_mount="" # 例如 /data
# 运行时探测结果
fs_type="" # ext4 | xfs
mount_source="" # /dev/mapper/vg-lv 或 /dev/vg/lv
lv_path="" # 规范化后的 LV 真实路径
vg_name="" # VG 名称
new_part="" # 新建分区路径:/dev/sdb1 或 /dev/nvme0n1p1
# 回滚动作栈(先进后出)
rollback_stack=()
committed="0"
#===============================================================================
# 函数调用关系图ASCII
#===============================================================================
# main
# ├─ parse_args
# ├─ init_traps
# ├─ require_root
# ├─ check_dependencies
# ├─ detect_target_lvm_by_mount
# │ ├─ get_mount_info
# │ ├─ normalize_lv_and_vg
# ├─ validate_raw_disk_safe
# ├─ prepare_gpt_partition_for_lvm
# │ ├─ make_partition_path
# │ ├─ create_gpt_partition (sgdisk|parted)
# ├─ create_and_attach_pv_to_vg
# │ ├─ pvcreate
# │ ├─ vgextend
# ├─ extend_lv_and_grow_fs
# │ ├─ lvextend -r
# ├─ verify_result
# └─ success_exit
#
# on_error (trap ERR)
# └─ rollback_all
#===============================================================================
#===============================================================================
# 日志模块
#===============================================================================
### 输出日志(统一入口)
# @param level string 日志级别(DEBUG/INFO/WARN/ERROR)
# @param message string 日志内容
# @return 0 成功
# @require date, printf
log() {
local level="$1"; shift
local message="$*"
local ts
ts="$(date '+%Y-%m-%d %H:%M:%S')"
# > 级别过滤DEBUG < INFO < WARN < ERROR
local order_current order_target
order_current="$(log_level_to_int "$log_level")"
order_target="$(log_level_to_int "$level")"
if (( order_target < order_current )); then
return 0
fi
printf '[%s] [%s] %s\n' "$ts" "$level" "$message" >&2
}
### 将日志级别映射为数字
# @param level string 日志级别
# @return 0 成功echo输出数字
# @require printf
log_level_to_int() {
local level="$1"
case "$level" in
DEBUG) printf '10' ;;
INFO) printf '20' ;;
WARN) printf '30' ;;
ERROR) printf '40' ;;
*) printf '20' ;; # 默认 INFO
esac
}
log_debug(){ log "DEBUG" "$*"; }
log_info(){ log "INFO" "$*"; }
log_warn(){ log "WARN" "$*"; }
log_error(){ log "ERROR" "$*"; }
#===============================================================================
# 工具/执行模块
#===============================================================================
### 执行命令(支持 dry-run
# @param cmd string 要执行的命令(以参数形式传入)
# @return 0 成功非0 失败
# @require printf, bash
run_cmd() {
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] $*"
return 0
fi
log_debug "RUN: $*"
"$@"
}
### 检查命令是否存在
# @param cmd string 命令名
# @return 0 存在非0 不存在
# @require command
require_cmd() {
local cmd="$1"
if ! command -v "$cmd" >/dev/null 2>&1; then
log_error "缺少依赖命令: $cmd"
return 1
fi
}
### 推入回滚动作(字符串命令)
# @param action string 回滚动作(将通过 bash -c 执行)
# @return 0 成功
# @require none
push_rollback() {
local action="$1"
rollback_stack+=("$action")
log_debug "已登记回滚动作: $action"
}
### 执行所有回滚动作LIFO
# @return 0 成功(尽最大努力)
# @require bash
rollback_all() {
local i
if (( ${#rollback_stack[@]} == 0 )); then
log_warn "无回滚动作可执行。"
return 0
fi
log_warn "开始回滚(共 ${#rollback_stack[@]} 步)..."
for (( i=${#rollback_stack[@]}-1; i>=0; i-- )); do
local action="${rollback_stack[$i]}"
log_warn "回滚: $action"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过回滚执行"
continue
fi
# > 回滚尽力而为:失败不再中断后续回滚
bash -c "$action" || log_warn "回滚动作执行失败(已忽略): $action"
done
log_warn "回滚结束。"
}
#===============================================================================
# Trap/错误处理模块
#===============================================================================
### 初始化 trap
# @return 0 成功
# @require trap
init_traps() {
trap 'on_error $? $LINENO' ERR
trap 'on_exit $?' EXIT
}
### ERR trap 处理
# @param exit_code int 退出码
# @param line_no int 行号
# @return 0 成功
# @require none
on_error() {
local exit_code="$1"
local line_no="$2"
log_error "脚本执行失败exit=$exit_code, line=$line_no)。"
if [[ "${committed}" == "1" ]]; then
log_error "检测到已提交变更LV/FS 已扩容),为避免破坏性操作:将跳过自动回滚。"
exit "$exit_code"
fi
log_error "将进行回滚..."
rollback_all
exit "$exit_code"
}
### EXIT trap 处理(用于输出调试信息,不做回滚)
# @param exit_code int 退出码
# @return 0 成功
# @require none
on_exit() {
local exit_code="$1"
if [[ "$exit_code" == "0" ]]; then
log_info "完成:扩展流程已成功结束。"
else
log_error "退出脚本以非0退出码结束exit=$exit_code)。"
fi
}
#===============================================================================
# 参数解析与使用说明
#===============================================================================
### 使用说明
# @return 0 成功
# @require cat
usage() {
cat <<EOF
用法:
$SCRIPT_NAME --disk /dev/sdX --mount /path/to/mount [--force] [--dry-run] [--log-level DEBUG|INFO|WARN|ERROR]
参数:
--disk, -d 需要格式化并用于扩展的裸盘(例如 /dev/sdb, /dev/nvme1n1
--mount, -m 需要扩展的挂载目录(例如 /data
--force, -f 强制执行:当检测到磁盘含签名/已有分区等风险时仍继续(仍会拒绝“已挂载/已作为PV”的磁盘
--dry-run 演练模式:仅打印将执行的操作,不实际改动
--log-level 日志级别(默认 INFO
示例:
$SCRIPT_NAME -d /dev/sdb -m /data
$SCRIPT_NAME -d /dev/nvme1n1 -m /data --force
EOF
}
### 解析参数
# @param args string 参数列表
# @return 0 成功
# @require shift
parse_args() {
while (( $# > 0 )); do
case "$1" in
-d|--disk) raw_disk="${2:-}"; shift 2 ;;
-m|--mount) target_mount="${2:-}"; shift 2 ;;
-f|--force) force="1"; shift ;;
--dry-run) dry_run="1"; shift ;;
--log-level) log_level="${2:-INFO}"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*)
log_error "未知参数: $1"
usage
exit 2
;;
esac
done
if [[ -z "$raw_disk" || -z "$target_mount" ]]; then
log_error "必须提供 --disk 与 --mount"
usage
exit 2
fi
}
#===============================================================================
# 前置检查模块
#===============================================================================
### 检查是否 root
# @return 0 成功非0 失败
# @require id
require_root() {
if [[ "$(id -u)" != "0" ]]; then
log_error "必须以 root 运行(需要分区/LVM 操作权限)。"
return 1
fi
}
### 检查依赖命令
# @return 0 成功非0 失败
# @require command
check_dependencies() {
# 基础
require_cmd findmnt
require_cmd lsblk
require_cmd blkid
require_cmd wipefs
require_cmd partprobe
require_cmd udevadm
# LVM
require_cmd pvs
require_cmd vgs
require_cmd lvs
require_cmd pvcreate
require_cmd pvremove
require_cmd vgextend
require_cmd vgreduce
require_cmd lvextend
# 分区工具:优先 sgdiskgdisk套件否则 parted
if command -v sgdisk >/dev/null 2>&1; then
:
else
require_cmd parted
fi
# 文件系统扩展工具(由探测类型决定)
# > 在探测 fs_type 后再校验(此处先不强制)
}
#===============================================================================
# 目标挂载点探测模块
#===============================================================================
### 获取挂载信息source 与 fstype
# @param mount_path string 挂载目录
# @return 0 成功
# @require findmnt
get_mount_info() {
local mount_path="$1"
if ! findmnt -T "$mount_path" >/dev/null 2>&1; then
log_error "挂载目录不存在或未挂载: $mount_path"
return 1
fi
mount_source="$(findmnt -nr -T "$mount_path" -o SOURCE)"
fs_type="$(findmnt -nr -T "$mount_path" -o FSTYPE)"
if [[ -z "$mount_source" || -z "$fs_type" ]]; then
log_error "无法获取挂载信息: mount=$mount_path"
return 1
fi
case "$fs_type" in
ext4|xfs) ;;
*)
log_error "仅支持 ext4 或 xfs当前检测到: $fs_type"
return 1
;;
esac
}
### 获取块设备 MAJ:MIN主:次设备号)
# @param dev string 块设备路径
# @return 0 成功echo 输出 253:0非0 失败
# @require lsblk, printf
get_maj_min() {
local dev="$1"
local mm
mm="$(lsblk -dn -o MAJ:MIN "$dev" 2>/dev/null | awk '{$1=$1;print}')"
if [[ -z "$mm" ]]; then
log_error "无法获取 MAJ:MIN: dev=$dev"
return 1
fi
printf '%s' "$mm"
}
### 通过扫描 lvs 输出匹配 LV/VG优先 dm_path其次 MAJ:MIN
# @param src_dev string 解析后的挂载设备(如 /dev/mapper/vg-lv 或 /dev/dm-0
# @param real_dev string 真实设备readlink -f 后,如 /dev/dm-0
# @return 0 成功echo 输出 "lv_path|vg_name"
# @require lvs, awk, readlink, lsblk
lookup_lv_vg_from_lvs() {
local src_dev="$1"
local real_dev="$2"
local mm
mm="$(get_maj_min "$real_dev")"
# A) 尝试lvs 输出包含 lv_dm_path 时,用 dm_path 直接匹配(最稳)
# > 不用 --select避免 selection 兼容性问题
if out="$(lvs --noheadings --separator '|' -o lv_path,vg_name,lv_dm_path 2>/dev/null)"; then
local hit
hit="$(
echo "$out" | awk -F'|' -v s="$src_dev" -v r="$real_dev" '
function trim(x){gsub(/^[ \t]+|[ \t]+$/,"",x); return x}
{
lv=trim($1); vg=trim($2); dm=trim($3)
if(lv=="" || vg=="") next
# 直接匹配 dm_path / src_dev / real_dev
if(dm==s || dm==r) {print lv "|" vg; exit}
}
'
)"
if [[ -n "$hit" ]]; then
printf '%s' "$hit"
return 0
fi
# 再放宽readlink -f(dm_path) 与 real_dev 比较(处理 dm_path 是 /dev/mapper/* 的情况)
hit="$(
echo "$out" | while IFS= read -r line; do
[[ -z "$line" ]] && continue
local lv vg dm dm_real
lv="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$1);print $1}')"
vg="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$2);print $2}')"
dm="$(echo "$line" | awk -F'|' '{gsub(/^[ \t]+|[ \t]+$/,"",$3);print $3}')"
[[ -z "$lv" || -z "$vg" || -z "$dm" ]] && continue
dm_real="$(readlink -f "$dm" 2>/dev/null || true)"
if [[ "$dm_real" == "$real_dev" ]]; then
echo "${lv}|${vg}"
break
fi
done
)"
if [[ -n "$hit" ]]; then
printf '%s' "$hit"
return 0
fi
fi
# B) 回退:用 lv_kernel_major/minor 匹配 MAJ:MIN跨版本最通用
# > 注意:部分环境列名可能不同,但大多数 lvm2 支持这两个字段
if out2="$(lvs --noheadings --separator '|' -o lv_path,vg_name,lv_kernel_major,lv_kernel_minor 2>/dev/null)"; then
local major minor
major="${mm%%:*}"
minor="${mm##*:}"
local hit2
hit2="$(
echo "$out2" | awk -F'|' -v mj="$major" -v mn="$minor" '
function trim(x){gsub(/^[ \t]+|[ \t]+$/,"",x); return x}
{
lv=trim($1); vg=trim($2); kmj=trim($3); kmn=trim($4)
if(lv=="" || vg=="") next
if(kmj==mj && kmn==mn) {print lv "|" vg; exit}
}
'
)"
if [[ -n "$hit2" ]]; then
printf '%s' "$hit2"
return 0
fi
fi
# C) 兜底lsblk 直接读 VG/LV 字段(某些 util-linux 会提供)
if vg="$(lsblk -dn -o VG "$real_dev" 2>/dev/null | awk '{$1=$1;print}')" \
&& lvname="$(lsblk -dn -o LV "$real_dev" 2>/dev/null | awk '{$1=$1;print}')"; then
if [[ -n "$vg" && -n "$lvname" ]]; then
printf '/dev/%s/%s|%s' "$vg" "$lvname" "$vg"
return 0
fi
fi
return 1
}
### 将 findmnt 的 SOURCE 解析为可用的块设备路径(兼容 UUID/LABEL
# @param source string 挂载源(/dev/xxx, UUID=..., LABEL=...
# @return 0 成功echo 输出 /dev/xxx非0 失败
# @require blkid, printf
resolve_mount_source_device() {
local source="$1"
local dev=""
case "$source" in
/dev/*)
dev="$source"
;;
UUID=*|LABEL=*)
# > 关键步骤:把 UUID=xxxx / LABEL=xxx 解析成 /dev/xxx
dev="$(blkid -o device -t "$source" 2>/dev/null | head -n1 || true)"
;;
*)
# 少数情况下 SOURCE 可能是 mapper 名称等,尝试补全
if [[ -e "/dev/$source" ]]; then
dev="/dev/$source"
fi
;;
esac
if [[ -z "$dev" || ! -e "$dev" ]]; then
log_error "无法将挂载源解析为设备路径: source=$source"
return 1
fi
if [[ ! -b "$dev" ]]; then
log_error "解析后的路径不是块设备: dev=$dev (from source=$source)"
return 1
fi
printf '%s' "$dev"
}
### 规范化 LV 路径并获取 VG 名称(强健版:避免 lvs --select 兼容性坑)
# @param source string 挂载源findmnt SOURCE
# @return 0 成功
# @require readlink, lvs, awk, lsblk, blkid
normalize_lv_and_vg() {
local source="$1"
local src_dev real_dev out
src_dev="$(resolve_mount_source_device "$source")"
real_dev="$(readlink -f "$src_dev")"
if [[ -z "$real_dev" || ! -b "$real_dev" ]]; then
log_error "无法解析真实设备路径: src_dev=$src_dev real_dev=$real_dev"
return 1
fi
log_debug "mount source resolve: source=$source src_dev=$src_dev real_dev=$real_dev"
if ! out="$(lookup_lv_vg_from_lvs "$src_dev" "$real_dev")"; then
log_error "无法从挂载源反查 LVM LV/VGsource=$source src_dev=$src_dev real_dev=$real_dev"
log_error "诊断建议:执行 `lvs -a -o lv_path,vg_name,lv_dm_path,lv_kernel_major,lv_kernel_minor` 查看映射。"
return 1
fi
lv_path="${out%%|*}"
vg_name="${out##*|}"
if [[ -z "$lv_path" || -z "$vg_name" ]]; then
log_error "解析 LV/VG 失败: out=$out"
return 1
fi
# 额外校验lv_path 设备存在(有的环境返回的是 /dev/vg/lv应该存在
if [[ ! -e "$lv_path" ]]; then
log_warn "lvs 返回的 lv_path 不存在,尝试 readlink/替代路径: lv_path=$lv_path"
# > 这里不强制失败,后续 lvextend 会更明确报错
fi
log_info "已解析目标: vg=$vg_name lv=$lv_path (from source=$source)"
}
### 综合探测目标 LVM
# @return 0 成功
# @require none
detect_target_lvm_by_mount() {
get_mount_info "$target_mount"
normalize_lv_and_vg "$mount_source"
# fs 工具依赖补充检查
if [[ "$fs_type" == "ext4" ]]; then
require_cmd resize2fs
else
require_cmd xfs_growfs
fi
log_info "目标挂载点: $target_mount"
log_info "挂载源设备: $mount_source"
log_info "文件系统类型: $fs_type"
log_info "LV 路径: $lv_path"
log_info "VG 名称: $vg_name"
}
#===============================================================================
# 新增裸盘安全检查模块
#===============================================================================
### 校验裸盘是否安全可用
# @param disk string 裸盘路径(/dev/sdX 或 /dev/nvmeXnY
# @return 0 成功
# @require lsblk, pvs, wipefs
validate_raw_disk_safe() {
local disk="$1"
if [[ ! -b "$disk" ]]; then
log_error "指定磁盘不是块设备: $disk"
return 1
fi
# 必须是 disk 类型
local dtype
dtype="$(lsblk -dn -o TYPE "$disk" 2>/dev/null || true)"
if [[ "$dtype" != "disk" ]]; then
log_error "指定设备不是磁盘(disk)类型: $disk (type=$dtype)"
return 1
fi
# 不能已作为 PV
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$disk"; then
log_error "磁盘已是 LVM PV拒绝操作: $disk"
return 1
fi
# 不能存在已挂载分区
local mps
mps="$(lsblk -nr -o MOUNTPOINT "$disk" 2>/dev/null | awk 'NF{print}')"
if [[ -n "$mps" ]]; then
log_error "检测到磁盘或其分区已挂载,拒绝操作: $disk"
log_error "挂载点: $mps"
return 1
fi
# 若存在子分区/签名,默认拒绝(--force 可放宽)
local has_children has_sig
has_children="$(lsblk -nr "$disk" -o NAME | awk 'NR>1{print}' | wc -l | awk '{$1=$1;print}')"
has_sig="0"
if wipefs -n "$disk" | awk 'NR>1{exit 0} END{exit 1}'; then
has_sig="1"
fi
if [[ "$force" != "1" ]]; then
if [[ "$has_children" != "0" ]]; then
log_error "磁盘存在分区/子设备(默认拒绝)。可加 --force 继续: $disk"
return 1
fi
if [[ "$has_sig" == "1" ]]; then
log_error "磁盘存在文件系统/签名(默认拒绝)。可加 --force 继续: $disk"
return 1
fi
else
# --force 仍然禁止“已作为PV的分区”
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -q "^${disk}"; then
log_error "检测到磁盘相关分区已是 PV即使 --force 也拒绝): $disk"
return 1
fi
fi
log_info "新增裸盘安全检查通过: $disk"
}
#===============================================================================
# 分区与 PV/VG/LV 扩展模块
#===============================================================================
### 生成分区路径(兼容 nvme 设备命名)
# @param disk string 磁盘路径
# @return 0 成功echo 输出分区路径)
# @require printf
make_partition_path() {
local disk="$1"
# > nvme/mmc 通常以数字结尾,需要 p1
if [[ "$disk" =~ [0-9]$ ]]; then
printf '%sp1' "$disk"
else
printf '%s1' "$disk"
fi
}
### 创建 GPT 分区并设置为 LVM 类型1号分区使用全盘
# @param disk string 裸盘
# @return 0 成功
# @require sgdisk 或 parted, partprobe, udevadm, wipefs
prepare_gpt_partition_for_lvm() {
local disk="$1"
new_part="$(make_partition_path "$disk")"
# 回滚:尽量恢复到“无分区表/无签名”的状态
# > 注意:这是破坏性回滚,仅针对“新增裸盘”
push_rollback "wipefs -a '$disk' || true"
if command -v sgdisk >/dev/null 2>&1; then
push_rollback "sgdisk --zap-all '$disk' || true"
else
# parted 无直接 zap-all用 dd 清前 10MiB尽力而为
push_rollback "dd if=/dev/zero of='$disk' bs=1M count=10 conv=fsync || true"
fi
log_info "开始为裸盘创建 GPT + LVM 分区: disk=$disk part=$new_part"
# > 关键步骤:先清签名,再分区
run_cmd wipefs -a "$disk"
if command -v sgdisk >/dev/null 2>&1; then
# > 关键步骤:清旧分区表并创建新 GPT
run_cmd sgdisk --zap-all "$disk"
run_cmd sgdisk -og "$disk"
# > 关键步骤:创建 1号分区全盘类型 8e00(Linux LVM)
run_cmd sgdisk -n 1:0:0 -t 1:8e00 -c 1:"lvm-pv" "$disk"
else
# parted 方式
# > 关键步骤:创建 GPT + 单分区 + LVM 标记
run_cmd parted -s "$disk" mklabel gpt
run_cmd parted -s "$disk" mkpart primary 1MiB 100%
run_cmd parted -s "$disk" set 1 lvm on
fi
run_cmd partprobe "$disk"
run_cmd udevadm settle
if [[ ! -b "$new_part" ]]; then
log_error "分区设备未出现(可能需要稍等或内核未识别): $new_part"
return 1
fi
log_info "分区创建完成: $new_part"
}
### 创建 PV 并扩展到目标 VG
# @param part string 新分区
# @param vg string 目标 VG 名称
# @return 0 成功
# @require pvcreate, vgextend, vgreduce, pvremove
create_and_attach_pv_to_vg() {
local part="$1"
local vg="$2"
# 防止误用:新分区不应已是 PV
if pvs --noheadings -o pv_name 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$part"; then
log_error "分区已是 PV拒绝重复操作: $part"
return 1
fi
log_info "创建 PV: $part"
run_cmd pvcreate -y "$part"
push_rollback "pvremove -ff -y '$part' || true"
log_info "扩展 VG: vgextend $vg $part"
run_cmd vgextend "$vg" "$part"
push_rollback "vgreduce '$vg' '$part' || true"
}
### 扩展 LV 并增长文件系统(使用 -r 自动处理)
# @param lv string LV 路径
# @return 0 成功
# @require lvextend
extend_lv_and_grow_fs() {
local lv="$1"
log_info "扩展 LV 并自动扩容文件系统: lvextend -l +100%FREE -r $lv"
run_cmd lvextend -l +100%FREE -r "$lv"
# > 关键步骤:一旦走到这里,磁盘变更已提交,不应再做自动回滚
committed="1"
log_info "已提交变更LV/FS 扩容已完成),后续将禁止自动回滚。"
}
#===============================================================================
# 结果校验模块
#===============================================================================
### 校验扩展结果修复PV->VG 校验误判)
# @param mount_path string 挂载目录
# @param lv string LV 路径
# @param part string 新增 PV 分区
# @param vg string VG 名称
# @return 0 成功
# @require df, lvs, vgs, pvs, awk, readlink
verify_result() {
local mount_path="$1"
local lv="$2"
local part="$3"
local vg="$4"
log_info "开始校验扩展结果..."
# 1) PV 是否在 VG 中(结构化解析 + 归一化路径)
local part_real
part_real="$(readlink -f "$part" 2>/dev/null || true)"
[[ -z "$part_real" ]] && part_real="$part"
local found="0"
while IFS='|' read -r pv_name pv_vg; do
pv_name="$(awk '{$1=$1;print}' <<<"${pv_name:-}")"
pv_vg="$(awk '{$1=$1;print}' <<<"${pv_vg:-}")"
[[ -z "$pv_name" || -z "$pv_vg" ]] && continue
local pv_real
pv_real="$(readlink -f "$pv_name" 2>/dev/null || true)"
[[ -z "$pv_real" ]] && pv_real="$pv_name"
if [[ "$pv_real" == "$part_real" && "$pv_vg" == "$vg" ]]; then
found="1"
break
fi
done < <(pvs --noheadings --separator '|' -o pv_name,vg_name 2>/dev/null || true)
if [[ "$found" != "1" ]]; then
log_error "校验失败:新增 PV 未正确加入 VG: part=$part (real=$part_real) vg=$vg"
log_error "诊断信息pvs -o pv_name,vg_name 输出如下(供排查):"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过诊断输出"
else
pvs --noheadings --separator '|' -o pv_name,vg_name >&2 || true
fi
return 1
fi
# 2) LV 是否可查询
if ! lvs --noheadings -o lv_path 2>/dev/null | awk '{$1=$1;print}' | grep -Fxq "$lv"; then
log_error "校验失败:无法查询到 LV: $lv"
return 1
fi
# 3) df 输出(人类可读)
log_info "df -h$mount_path"
if [[ "$dry_run" == "1" ]]; then
log_info "[DRY-RUN] 跳过 df -h"
else
df -h "$mount_path" >&2
fi
log_info "校验通过。"
}
#===============================================================================
# 主流程
#===============================================================================
### 主函数
# @return 0 成功
# @require none
main() {
parse_args "$@"
init_traps
require_root
check_dependencies
detect_target_lvm_by_mount
validate_raw_disk_safe "$raw_disk"
prepare_gpt_partition_for_lvm "$raw_disk"
create_and_attach_pv_to_vg "$new_part" "$vg_name"
extend_lv_and_grow_fs "$lv_path"
if ! verify_result "$target_mount" "$lv_path" "$new_part" "$vg_name"; then
log_warn "校验未通过但扩容步骤已完成。请人工复核pvs/vgs/lvs/df。"
exit 0
fi
# 成功后:清空回滚栈(避免 EXIT 误触发回滚;本脚本回滚仅在 ERR trap
rollback_stack=()
log_info "扩展成功mount=$target_mount lv=$lv_path vg=$vg_name new_pv=$new_part"
}
main "$@"
# pvs -o pv_name,vg_name,pv_size,pv_free
# vgs datavg -o vg_name,vg_size,vg_free
# lvs datavg/lvdata -o lv_path,lv_size,seg_count
# df -hT /var/lib/docker
# 综合目的是为了支持linux环境下新增磁盘逻辑卷的拓展请实现如下的功能
# 1. 参数变量
# 1. 设置需要格式化的裸盘名称
# 2. 设置需要扩展的磁盘目录
# 2. 实际脚本
# 1. 检查脚本需要使用的依赖
# 2. 根据需要扩展的磁盘目录检测到相应的PV LV名称磁盘格式为ext4或者XFS
# 3. 需要将新格式化的裸盘进行GPT格式的修改PV创建然后将PV扩展至需要扩展的磁盘目录的LV逻辑卷中
# 4. 检查实际的扩展是否成功,如果失败,恢复新格式化的裸盘清除的功能,恢复至原本状态

View File

@@ -0,0 +1,17 @@
请以Bash Shell脚本高级开发工程师的身份严格遵循以下编程规范实现指定功能
1. 代码结构规范
- 符合POSIX标准与Bash最佳实践v5.0+
- 实现清晰的模块划分和函数封装
- 采用防御性编程策略处理异常情况
- 包含完善的错误处理机制trap、set -euo pipefail
2. 函数设计标准
- 函数声明需包含: 功能描述段(使用###注释块 参数说明:@param <变量名> <数据类型> <用途说明> 返回值说明:@return <退出码> <状态描述> 环境依赖:@require <依赖项>
- 函数参数命名采用snake_case格式体现语义化特征
3. 文档规范
- 主脚本头部包含: 元数据声明(作者、版本、许可证) 全局常量定义区 模块依赖说明
- 关键算法步骤添加行内注释(# > 开头)
- 维护完整的函数调用关系图使用ASCII流程图
4. 质量保障
- 通过ShellCheck进行静态检测
- 统一的日志函数实现详细的日志分级输出DEBUG/INFO/WARN/ERROR

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: xakny
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
namespace: xakny
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort

View File

@@ -0,0 +1,214 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
image: '192.168.0.2:8033/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '1'
memory: 1Gi
requests:
cpu: '0.5'
memory: 500Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: '192.168.0.2:8033/cmii/doris.be-ubuntu:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '8'
memory: 8Gi
requests:
cpu: '4'
memory: 4Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-log
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris.cluster
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-be
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-storage
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: xakny
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort

View File

@@ -0,0 +1,198 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: meta
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-meta-pvc
- name: log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: '192.168.0.2:8033/cmii/doris.fe-ubuntu:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '4'
memory: 8Gi
requests:
cpu: '2'
memory: 4Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: log
mountPath: /opt/apache-doris/fe/log
- name: meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris.cluster
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-fe
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: meta
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10G
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi

View File

@@ -0,0 +1,5 @@
修改PVC文件
修改全部的NAMESPACE
修改statefulset里面的IMAGE