This commit is contained in:
zeaslity
2025-05-15 10:32:14 +08:00
parent 56c79a02a8
commit ce4165e36b
93 changed files with 18779 additions and 481 deletions

173
.idea/workspace.xml generated
View File

@@ -5,9 +5,100 @@
</component>
<component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="common update">
<change afterPath="$PROJECT_DIR$/54-202501-深圳规自-ARM/部署yaml/ts2mp4.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/cmii-uavms-pyfusion.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/doris部署/doris-docker.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/doris部署/doris_table_init.sql" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/all_tables_jianguan_已有行业应用.sql" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/config_info.sql" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/uav_lifecycle.sql" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/uav_notice.sql" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-deployment-dev.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-deployment.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deployment-pv.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-kind.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-operator.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/hunan-cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/cmii-uav-watchdog-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/cmii-uavms-pyfusion.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/harbor-secret.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/数据库脚本下载.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/下载镜像.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/安装golang.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/23-浙江交投/1-base-env-shell/进度说明-10日版本.txt" beforeDir="false" afterPath="$PROJECT_DIR$/23-浙江交投/1-base-env-shell/进度说明-10日版本.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/19-上海雷视/下载文件.txt" beforeDir="false" afterPath="$PROJECT_DIR$/19-上海雷视/下载文件.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/DEV批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/26-开发高可用集群/DEV批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/时间同步结果.txt" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/63-202504-CICD新/1-jenkins安装.txt" beforeDir="false" afterPath="$PROJECT_DIR$/63-202504-CICD新/1-jenkins安装.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/63-202504-CICD新/1.1-自定义编译jenkins.dockerfile" beforeDir="false" afterPath="$PROJECT_DIR$/63-202504-CICD新/1.1-自定义编译jenkins.dockerfile" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/AI安装/install_nvidia_docker.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/z-AI安装/install_nvidia_docker.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/AI安装/nvdia-docker-install-废弃.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/z-AI安装/nvdia-docker-install-废弃.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/a-AgentWdd-基础依赖.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/a-AgentWdd-基础依赖.ps1" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/a-AgentWdd-基础依赖.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/a-AgentWdd-基础依赖.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/octopus-agent-run.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/octopus-agent-run.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/z-同步资料-废弃.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/z-同步资料-废弃.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/deploy-nfs-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/install_minio.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/install_minio.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/在线安装nginx-centos.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx-centos.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/在线安装nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实nginx-offline-map.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-offline-map.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实的nginx配置.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实的nginx配置.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/设置ingress-nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/为node打标签.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/为node打标签.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/开机启动的脚本.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/开机启动的脚本.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/清理rke集群的安装.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/2-imageDownSync-ARM64.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/2-imageDownSync-ARM64.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/2-imageDownSync.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/2-imageDownSync.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/ImageSyncDLTU.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/image-clean.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/image-clean.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/image-sync.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/image-sync.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/imageSyncDLTU.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/imageSyncDLTU.ps1" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/imageSyncDownload.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/imageSyncDownload.ps1" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/kubernetes-images-2.5.7-1.20.4.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/kubernetes-images-2.5.7-1.20.4.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/middle-image-arm64-250218.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/middle-image-arm64-250218.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/清除镜像.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/清除镜像.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/离线更新tag脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/离线更新tag脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/6.2.0/z.user_base.sql" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/6.2.0/z.user_base.sql" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/关键参数说明.txt" beforeDir="false" afterPath="$PROJECT_DIR$/999-部署模板/关键参数说明.txt" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -17,8 +108,23 @@
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="HighlightingSettingsPerFile">
<setting file="file://$PROJECT_DIR$/65-202504-湖南二级监管/hunan-cluster.yaml" root0="FORCE_HIGHLIGHTING" />
</component>
<component name="KubernetesApiPersistence">{}</component>
<component name="KubernetesApiProvider">{
&quot;configuredContexts&quot;: [
{
&quot;name&quot;: &quot;hunan-kcs&quot;,
&quot;kubeConfigUrl&quot;: &quot;file://C:/Users/wdd/.kube/config&quot;,
&quot;currentNamespace&quot;: &quot;Kubernetes.All.Namespaces.Label&quot;
},
{
&quot;name&quot;: &quot;cmii-dev-cluster&quot;,
&quot;kubeConfigUrl&quot;: &quot;file://C:/Users/wdd/.kube/config&quot;,
&quot;currentNamespace&quot;: &quot;uavcloud-devflight&quot;
}
],
&quot;isMigrated&quot;: true
}</component>
<component name="ProjectColorInfo">{
@@ -42,23 +148,27 @@
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wdd/Documents/IdeaProjects/CmiiDeploy/62-202504-云南甘肃漏洞&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wdd/Documents/IdeaProjects/CmiiDeploy/54-202501-深圳规自-ARM/部署yaml&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;editor.preferences.tabs&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;com.intellij.kubernetes.view.ui.settings.KubernetesViewConfigurable&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
}
}</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\62-202504-云南甘肃漏洞" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\61-202504-厦门升级" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\57-202503-鄂尔多斯机场\doris多主机部署" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\54-202501-深圳规自-ARM\部署yaml" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\yaml" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\64-202504-云南二级监管" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\doris-deploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\doris-simple-k8s-失败" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\64-202504-云南二级监管\doris部署" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\999-部署模板" />
@@ -89,7 +199,21 @@
<workItem from="1743989590460" duration="6809000" />
<workItem from="1744092938322" duration="6438000" />
<workItem from="1744272013796" duration="6427000" />
<workItem from="1744768739183" duration="16694000" />
<workItem from="1744768739183" duration="25427000" />
<workItem from="1745205882820" duration="2130000" />
<workItem from="1745222576370" duration="702000" />
<workItem from="1745289703609" duration="14767000" />
<workItem from="1745562918278" duration="5765000" />
<workItem from="1745717940572" duration="8365000" />
<workItem from="1745802927208" duration="14955000" />
<workItem from="1745909669919" duration="1949000" />
<workItem from="1745911652344" duration="11632000" />
<workItem from="1745993177704" duration="2845000" />
<workItem from="1746670196398" duration="6037000" />
<workItem from="1746755608411" duration="5580000" />
<workItem from="1746770911238" duration="2282000" />
<workItem from="1747012932022" duration="4082000" />
<workItem from="1747204385370" duration="2904000" />
</task>
<task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" />
@@ -99,7 +223,23 @@
<option name="project" value="LOCAL" />
<updated>1742174375760</updated>
</task>
<option name="localTasksCounter" value="2" />
<task id="LOCAL-00002" summary="大量更新">
<option name="closed" value="true" />
<created>1744873984891</created>
<option name="number" value="00002" />
<option name="presentableId" value="LOCAL-00002" />
<option name="project" value="LOCAL" />
<updated>1744873984891</updated>
</task>
<task id="LOCAL-00003" summary="大量更新">
<option name="closed" value="true" />
<created>1744873998562</created>
<option name="number" value="00003" />
<option name="presentableId" value="LOCAL-00003" />
<option name="project" value="LOCAL" />
<updated>1744873998562</updated>
</task>
<option name="localTasksCounter" value="4" />
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
@@ -117,21 +257,16 @@
</option>
</component>
<component name="VcsManagerConfiguration">
<option name="CHECK_CODE_SMELLS_BEFORE_PROJECT_COMMIT" value="false" />
<MESSAGE value="common update" />
<option name="LAST_COMMIT_MESSAGE" value="common update" />
<MESSAGE value="大量更新" />
<option name="LAST_COMMIT_MESSAGE" value="大量更新" />
</component>
<component name="VgoProject">
<settings-migrated>true</settings-migrated>
</component>
<component name="XDebuggerManager">
<breakpoint-manager>
<breakpoints>
<line-breakpoint enabled="true" type="bashpro.debug.shLine">
<url>file://$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh</url>
<line>8</line>
<option name="timeStamp" value="1" />
</line-breakpoint>
</breakpoints>
</breakpoint-manager>
<component name="XSLT-Support.FileAssociations.UIState">
<expand />
<select />
</component>
</project>

View File

@@ -11,4 +11,13 @@ curl -x socks5h://103.0.180.82:9997 https://oss.demo.uavcmlc.com/cmlc-installati
curl -x socks5h://103.0.180.82:9997 https://oss.demo.uavcmlc.com/cmlc-installation/shls/middleware-images.tar.gz -o middleware-images.tar.gz
curl http://103.0.180.82:9000/octopus/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip
curl http://103.0.180.82:9000/octopus/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip
wget 42.192.52.227:9000/octopus/doris/chengdu.csv
wget 42.192.52.227:9000/octopus/doris/cmii_dwd_reg_grid_mowork_signal_detail_dd.csv
wget 42.192.52.227:9000/octopus/doris/cmii_integration_tmpnal_micro_meteorology.csv
wget 42.192.52.227:9000/octopus/doris/doris-v2-v21.sql

View File

@@ -9,9 +9,14 @@ all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.
all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-worker-06 dev-worker-07 dev-worker-08 dev-worker-09)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104 192.168.40.53 192.168.40.54 192.168.40.55)
for server in "${all_server_list[@]}";do
echo "server is ${server}"
ssh root@"${server}" "df -TH"
for server in "${all_server_list[@]}"; do
result=$(ssh root@"$server" "df -TH | awk '\$6+0 > 60'| grep -v nfs")
if [ -n "$result" ]; then
echo "server is $server"
echo "$result"
echo "-----------------------------"
fi
done

View File

@@ -1,159 +0,0 @@
server is 192.168.35.105
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:29 CST
Universal time: Wed 2025-03-12 07:31:29 UTC
RTC time: Wed 2025-03-12 06:48:28
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.114
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.115
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.55
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:30 CST
Universal time: Wed 2025-03-12 07:31:30 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.86
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:31 CST
Universal time: Wed 2025-03-12 07:31:31 UTC
RTC time: Wed 2025-03-12 06:48:29
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.89
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:31 CST
Universal time: Wed 2025-03-12 07:31:31 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.93
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:32 CST
Universal time: Wed 2025-03-12 07:31:32 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.95
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:32 CST
Universal time: Wed 2025-03-12 07:31:32 UTC
RTC time: Wed 2025-03-12 06:48:31
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.96
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:33 CST
Universal time: Wed 2025-03-12 07:31:33 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.101
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:33 CST
Universal time: Wed 2025-03-12 07:31:33 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.103
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:34 CST
Universal time: Wed 2025-03-12 07:31:34 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.35.104
Authorized users only. All activities may be monitored and reported.
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:34 CST
Universal time: Wed 2025-03-12 07:31:34 UTC
RTC time: Wed 2025-03-12 06:48:32
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.53
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:36 CST
Universal time: Wed 2025-03-12 07:31:36 UTC
RTC time: Wed 2025-03-12 07:31:36
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.54
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:39 CST
Universal time: Wed 2025-03-12 07:31:39 UTC
RTC time: Wed 2025-03-12 07:31:39
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no
server is 192.168.40.55
/etc/profile.d/system-info.sh: line 26: bc: command not found
Local time: Wed 2025-03-12 15:31:43 CST
Universal time: Wed 2025-03-12 07:31:43 UTC
RTC time: Wed 2025-03-12 07:31:43
Time zone: Asia/Shanghai (CST, +0800)
System clock synchronized: yes
NTP service: active
RTC in local TZ: no

View File

@@ -0,0 +1,167 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-tsmpf-cm
namespace: szgz
data:
server_config_docker.ini: >
#minio=mc alias set minIO https://oss.demo.uavcmlc.com:18000/ cmii
B#923fC7mk
minio=mc alias set minIO http://172.31.2.7:9000/ cmii B#923fC7mk
http_port=8192
docker_logs_path=/workspace/logs
docker_temp_folder=/workspace/temp_folder
mp4_file_size=500
upload_url=no used.
query_file_url=no used.
download_url=no_useful_now.
send_complete_msg_url=no used.
logs_path=/workspace/logs
temp_folder=/workspace/temp_folder
#需要排除的合并任务(这些任务由于ts文件不存在导致无法成功)
exclude_task_codes=
#是否执行playback的任务,0:false; 1:true;
merge_playback=1
#下载ts文件方式http, mc
download_type=mc
#仅合成某个公司的文件;(可用于希望优先下载某个公司的文件的场景); 数字半角逗号分割;
only_download_company_ids=
#是否记录磁盘日志; 0:false, 1:true
log_to_disk=1
#(integer)日志级别; 1:error, 2:warn, 3:info, 4:debug
log_level=2
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uav-tsmpf
namespace: szgz
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
emptyDir: {}
- name: server-config-docker
configMap:
name: cmii-uav-tsmpf-cm
items:
- key: server_config_docker.ini
path: server_config_docker.ini
containers:
- name: cmii-uav-tsmpf
image: '172.31.2.7:8033/admin/cmii/ts2mp4_docker_image_arm64:v1.1.0'
command:
- /workspace/ts_file_merge
- /workspace/server_config_docker.ini
ports:
- name: pod-port
containerPort: 8192
protocol: TCP
env:
- name: K8S_NAMESPACE
value: szgz
- name: APPLICATION_NAME
value: cmii-uav-tsmpf
resources:
requests:
cpu: 500m
memory: 2Gi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: szgz/cmii-uav-tsmpf
- name: server-config-docker
mountPath: /workspace/server_config_docker.ini
subPath: server_config_docker.ini
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- szgz
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uav-tsmpf
namespace: szgz
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8192
targetPort: 8192
nodePort: 38192
selector:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
type: NodePort

View File

@@ -14,4 +14,29 @@ docker run --name jenkins-docker --rm --detach \
# build jenkins
cd /root/wdd/jenkins-build
docker build -t harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21 .
docker build -t harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21 .
# 镜像构建成功
harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21
docker run \
--name jenkins-server \
--restart=on-failure \
--detach \
--network jenkins \
--env DOCKER_HOST=tcp://docker:2376 \
--env DOCKER_CERT_PATH=/certs/client \
--env DOCKER_TLS_VERIFY=1 \
--volume jenkins-data:/var/jenkins_home \
--volume jenkins-docker-certs:/certs/client:ro \
--publish 8080:8080 \
--publish 50000:50000 \
harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21
docker exec -it jenkins-server /bin/bash
cat /var/jenkins_home/secrets/initialAdminPassword
c99b712eac254e89a90f83d0f280e35a

View File

@@ -9,13 +9,13 @@ RUN rm -rf /etc/apt/sources.list.d/* \
&& echo "deb http://mirrors.aliyun.com/debian/ bookworm-updates main non-free contrib" >> /etc/apt/sources.list
RUN apt-get update \
&& apt-get install -y lsb-release ca-certificates curl && \
&& apt-get install -y lsb-release ca-certificates curl apt-utils && \
install -m 0755 -d /etc/apt/keyrings && \
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc && \
chmod a+r /etc/apt/keyrings/docker.asc && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \
https://mirrors.tuna.tsinghua.edu.cn $(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" \
| tee /etc/apt/sources.list.d/docker.list > /dev/null && \
curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg && \
chmod a+r /etc/apt/keyrings/docker.gpg && \
echo "docker key add => $(cat /etc/apt/keyrings/docker.gpg)" && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null && \
echo "docker apt source add => $(cat /etc/apt/sources.list.d/docker.list) \n\n" && \
apt-get update && apt-get install -y docker-ce-cli && \
apt-get clean && rm -rf /var/lib/apt/lists/*

View File

@@ -0,0 +1,138 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: ynydapp
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uavms-pyfusion
namespace: ynydapp
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: pyfusion-conf
configMap:
name: pyfusion-configmap
items:
- key: config.yaml
path: config.yaml
containers:
- name: cmii-uavms-pyfusion
image: 'harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:6.3.1'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: VERSION
value: 6.2.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uavms-pyfusion
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: K8S_NAMESPACE
value: ynydapp
- name: APPLICATION_NAME
value: cmii-uavms-pyfusion
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: ynydapp/cmii-uavms-pyfusion
- name: pyfusion-conf
mountPath: /app/config.yaml
subPath: config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uavms-pyfusion
namespace: ynydapp
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
type: ClusterIP
sessionAffinity: None

View File

@@ -0,0 +1,48 @@
version: "3"
services:
fe:
image: 192.168.186.11:8033/cmii/doris.fe-amd64:2.1.6
hostname: fe
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "2"
environment:
- FE_SERVERS=fe1:172.21.80.2:9010
- FE_ID=1
ports:
- 8030:8030
- 9030:9030
volumes:
- /var/lib/docker/doris/doris-meta:/opt/apache-doris/fe/doris-meta
- /var/lib/docker/doris/fe-log:/opt/apache-doris/fe/log
networks:
doris_net:
ipv4_address: 172.21.80.2
be:
image: 192.168.186.11:8033/cmii/doris.be-amd64:2.1.6
hostname: be
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "2"
environment:
- FE_SERVERS=fe1:172.21.80.2:9010
- BE_ADDR=172.21.80.3:9050
ports:
- 8040:8040
volumes:
- /var/lib/docker/doris/storage:/opt/apache-doris/be/storage
- /var/lib/docker/doris/be-log:/opt/apache-doris/be/log
depends_on:
- fe
networks:
doris_net:
ipv4_address: 172.21.80.3
networks:
doris_net:
ipam:
config:
- subnet: 172.21.80.0/24

View File

@@ -0,0 +1,573 @@
CREATE DATABASE IF NOT EXISTS cmii;
USE cmii;
CREATE TABLE IF NOT EXISTS `dwd_reg_airspace_grid_detail_dd` (
`geoCode` varchar(32) NOT NULL COMMENT '网格码',
`areaCode` varchar(32) NOT NULL COMMENT '空域标识码',
`areaCategory` tinyint NOT NULL COMMENT '空域类型1飞行活动空域 2划设空域',
`geoType` varchar(32) NULL COMMENT ' 网格类型2二维网格3三维网格'
) ENGINE=OLAP
UNIQUE KEY(`geoCode`, `areaCode`)
COMMENT '空域网格明细表'
DISTRIBUTED BY HASH(`areaCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_flight_act_grid_detail_dd` (
`report_date` date NOT NULL COMMENT '活动日期',
`geo_num4` varchar(32) NOT NULL COMMENT '二维网格码',
`flight_code` varchar(32) NOT NULL COMMENT '飞行活动标识码'
) ENGINE=OLAP
UNIQUE KEY(`report_date`, `geo_num4`, `flight_code`)
COMMENT '飞行活动网格明细表'
AUTO PARTITION BY RANGE (date_trunc(`report_date`, 'month'))
()
DISTRIBUTED BY HASH(`report_date`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_2dcode_dd` (
`geo2DCode` varchar(32) NOT NULL COMMENT '二维网格码',
`maxLat` double NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`geo2DCodeInteger` bigint NULL COMMENT '二维网格十进制整型',
`min2DCodeInteger` bigint NULL COMMENT '子网格十进制整型',
`max2DCodeInteger` bigint NULL COMMENT '子网格十进制整型'
) ENGINE=OLAP
UNIQUE KEY(`geo2DCode`)
COMMENT '二维网格位置明细表'
DISTRIBUTED BY HASH(`geo2DCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_3dcode_dd` (
`geo3DCode` varchar(32) NOT NULL COMMENT '三维网格码',
`maxLat` double NOT NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`minHeight` double NULL COMMENT '最小高度',
`maxHeight` double NULL COMMENT '最大高度',
`min3DCodeOx` varchar(32) NULL COMMENT '最小网格码八进制',
`max3DCodeOx` varchar(32) NULL COMMENT '最大网格码八进制'
) ENGINE=OLAP
UNIQUE KEY(`geo3DCode`)
COMMENT '三维网格明细表'
DISTRIBUTED BY HASH(`geo3DCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_network_signal_detail_dd` (
`geo_3d_code` varchar(32) NOT NULL COMMENT '三维网格码',
`snr` double NULL COMMENT '信噪比',
`rsrp` double NULL COMMENT '参考信号接收功率,单位dBm',
`rsrq` double NULL COMMENT '参考信号接收质量',
`pci` varchar(320) NULL COMMENT '网络小区编号',
`latest_lat84` double NULL COMMENT '最近一次测量点纬度',
`latest_lng84` double NULL COMMENT '最近一次测量点经度',
`latest_height` double NULL COMMENT '最近一次测量点相对高度',
`latest_time` datetime NULL COMMENT '最近一次测量时间'
) ENGINE=OLAP
UNIQUE KEY(`geo_3d_code`)
COMMENT '网格信号强度明细表'
DISTRIBUTED BY HASH(`geo_3d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_risk_detail_dd` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格码',
`height` smallint NOT NULL COMMENT '相对高度',
`risk_level` tinyint NULL COMMENT '风险等级,-1:缺数据无法计算,1:低风险,2:中风险,3:高风险',
`calculation_time` datetime NULL COMMENT '计算时间'
) ENGINE=OLAP
UNIQUE KEY(`geo_2d_code`, `height`)
COMMENT '网格风险等级明细表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_model_grid_detail_dd` (
`model_id` varchar(32) NOT NULL COMMENT '模型ID',
`geo_3d_code` varchar(32) NOT NULL COMMENT '二维网格码',
`min_lng` double NOT NULL COMMENT '最小经度',
`min_lat` double NULL COMMENT '最小纬度',
`max_lng` double NULL COMMENT '最大经度',
`max_lat` double NULL COMMENT '最大纬度',
`min_height` double NULL COMMENT '最小高度',
`max_height` double NULL COMMENT '最大高度'
) ENGINE=OLAP
UNIQUE KEY(`model_id`, `geo_3d_code`)
COMMENT '模型网格明细表'
DISTRIBUTED BY HASH(`model_id`, `geo_3d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dws_reg_flight_act_grid_1d` (
`report_date` date NOT NULL COMMENT '活动日期',
`geo_num4` varchar(32) NOT NULL COMMENT '二维网格码',
`flight_total` bigint NOT NULL COMMENT '飞行活动统计数量'
) ENGINE=OLAP
UNIQUE KEY(`report_date`, `geo_num4`)
COMMENT '飞行活动网格流量聚合表'
AUTO PARTITION BY RANGE (date_trunc(`report_date`, 'month'))
()
DISTRIBUTED BY HASH(`report_date`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dws_reg_grid_population_1d` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '(二维网格码17级)',
`time` datetime NOT NULL COMMENT '统计时间',
`population` int NULL COMMENT '人口数',
`population_density` double NULL COMMENT '人口密度'
) ENGINE=OLAP
UNIQUE KEY(`geo_2d_code`, `time`)
COMMENT '网格人口统计日表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))
()
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `ods_bridge_object_track_sense` (
`object_id` varchar(64) NOT NULL COMMENT '目标唯一编号',
`provider_code` varchar(32) NOT NULL COMMENT '设备提供方编码',
`device_type` int NOT NULL COMMENT '设备类型',
`device_id` varchar(128) NOT NULL COMMENT '设备id',
`pt_time` bigint NOT NULL COMMENT '上报时间戳',
`msg_cnt` bigint NULL COMMENT '消息编号',
`longitude` double NOT NULL COMMENT '经度',
`latitude` double NOT NULL COMMENT '纬度',
`altitude` float NULL,
`height` float NULL COMMENT '距地高度',
`speed` float NOT NULL COMMENT '目标速度',
`time` datetime(3) NOT NULL COMMENT '探测时间',
`extension` variant NULL COMMENT '扩展信息'
) ENGINE=OLAP
DUPLICATE KEY(`object_id`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))()
DISTRIBUTED BY HASH(`object_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_bridge_uas_track_report` (
`uas_id` varchar(64) NOT NULL COMMENT '无人机识别码',
`third_code` varchar(32) NOT NULL COMMENT '三方平台编码',
`angle` float NOT NULL COMMENT '航迹角',
`speed` float NOT NULL COMMENT '地速',
`latitude` double NOT NULL COMMENT '纬度',
`longitude` double NOT NULL COMMENT '经度',
`altitude` double NOT NULL COMMENT '海拔高度',
`height` float NULL COMMENT '距地高度',
`time` datetime(3) NOT NULL COMMENT '上报时间',
`extension` variant NULL COMMENT '扩展信息'
) ENGINE=OLAP
DUPLICATE KEY(`uas_id`)
COMMENT '合作无人机上报数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))()
DISTRIBUTED BY HASH(`uas_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_warn_item` (
`uas_id` varchar(256) NOT NULL COMMENT '无人机识别码或探测目标id',
`warn_code` varchar(32) NOT NULL COMMENT '告警唯一标识',
`id` bigint NOT NULL COMMENT 'mysql自增ID',
`id_type` tinyint NULL COMMENT '无人机的id类型,1、三方平台 2、监管无人机 3、RemoteID 4.探测轨迹',
`cooperate` tinyint NOT NULL COMMENT '是否为合作类 1.合作; 0.非合作',
`provider_code` varchar(64) NULL COMMENT '设备来源厂家编号',
`device_id` varchar(64) NULL COMMENT '数据来源设备id设备方定义id)',
`device_type` int NULL COMMENT '来源设备类型(0、5G-A基站5ga 1、雷达设备(radar)2、频谱设备 (spec)3、光电设备(oe)4、反制设备,cm5、诱骗设备(dec)6、干扰设备(ifr) 7、指挥车(cv)8、察打一体 (isrs)999、其他设备(other))',
`event` tinyint NOT NULL COMMENT '告警类型(1 偏航预警2 闯入管制区3 闯入临时空域4.非合飞行)',
`warn_key` varchar(32) NULL COMMENT '告警特征值(uasId、event、refcode等产生',
`warning_content` varchar(256) NULL COMMENT '告警信息,主要是存放各种告警说明',
`start_time` datetime NOT NULL COMMENT '开始时间',
`end_time` datetime NULL COMMENT '结束时间',
`status` tinyint NOT NULL COMMENT '告警状态0告警中1:告警结束',
`longitude84` double NULL COMMENT '产生告警时经度(WGS-84)',
`latitude84` double NULL COMMENT '产生告警时纬度(WGS-84)',
`height` double NULL COMMENT '产生告警时高度',
`altitude` double NULL COMMENT '拔高度,单位米',
`speed` double NULL COMMENT '产生告警时的地速度',
`end_type` tinyint NULL COMMENT '结束类型0自动结束1手动结束',
`ref_area_code` varchar(32) NULL COMMENT 'event为23时首次产生告警时关联空域编号 event为3时飞行活动编号',
`ref_area_name` varchar(40) NULL COMMENT 'event为23时首次产生告警时关联空域名称.只存第一个空域名称',
`uav_update_time` datetime NULL COMMENT '无人机信息更新时间(以下为告警最新对应无人机信息)',
`uav_lng` double NULL COMMENT '最近1次告警时无人机经度',
`uav_lat` double NULL COMMENT '最近1次告警时无人机纬度',
`uav_height` double NULL COMMENT '最近1次告警时无人机高度',
`uav_altitude` double NULL COMMENT '最近1次告警时无人机的拔高度单位米',
`uav_speed` double NULL COMMENT '最近1次告警时无人机地速度',
`is_deal` tinyint NULL COMMENT '是否已经处置。 0.未处置 1.已处置',
`update_at` datetime NULL COMMENT '修改时间',
`create_at` datetime NULL COMMENT '创建时间'
) ENGINE=OLAP
DUPLICATE KEY(`uas_id`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`start_time`, 'month'))()
DISTRIBUTED BY HASH(`uas_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_network_signal_loss` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`flight_height` smallint NOT NULL COMMENT '相对空域高度,单位:m',
`path_loss` double NOT NULL COMMENT '预估信号损耗,负值,数值越大表示信号质量越好',
`path_loss_normalized` double NOT NULL COMMENT '预估信号损耗标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格网络信号损耗表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_poi_density` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`poi_density` double NOT NULL COMMENT 'poi密度',
`poi_density_normalized` double NOT NULL COMMENT 'poi密度标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格poi密度表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_pop_exposure_risk` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`pop_exposure_risk` double NOT NULL COMMENT '人口暴露风险',
`pop_exposure_risk_normalized` double NOT NULL COMMENT '人口暴露风险标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格人口暴露风险'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_tmp_airline_grid_detail_dd` (
`airLineId` varchar(128) NOT NULL COMMENT '航线ID',
`geo3DCode` varchar(32) NOT NULL COMMENT '三维网格码',
`maxLat` double NOT NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`minHeight` double NULL COMMENT '最小高度',
`maxHeight` double NULL COMMENT '最大高度'
) ENGINE=OLAP
UNIQUE KEY(`airLineId`, `geo3DCode`)
COMMENT '三维网格明细表'
DISTRIBUTED BY HASH(`airLineId`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_mock_network_signal_detail_dd` (
`geo_3d_code` varchar(32) NOT NULL COMMENT '三维网格码',
`sampling_time` date NOT NULL COMMENT '采样时间',
`rsrp` double NULL COMMENT '参考信号接收功率,单位dBm'
) ENGINE=OLAP
UNIQUE KEY(`geo_3d_code`, `sampling_time`)
COMMENT 'radio map模拟网格信号强度明细表'
DISTRIBUTED BY HASH(`sampling_time`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `ods_external_attack_track_report` (
`source` varchar(255) NULL COMMENT '数据来源',
`type` varchar(255) NULL COMMENT '数据来源类型',
`attack_id` varchar(255) NULL COMMENT '告警 id',
`src_ip` varchar(255) NULL COMMENT '源 IP',
`src_port` int NULL COMMENT '源端口',
`timestamp` bigint NULL COMMENT '时间戳',
`dest_ip` varchar(255) NULL COMMENT '目的地ip',
`dest_port` int NULL COMMENT '目的端口',
`attack_type` varchar(255) NULL COMMENT '攻击类型',
`attack_name` varchar(255) NULL COMMENT '攻击名称',
`ext` variant NULL COMMENT '扩展字段'
) ENGINE=OLAP
DUPLICATE KEY(`source`, `type`)
COMMENT '外部攻击数据'
DISTRIBUTED BY HASH(`source`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_external_data_track_report` (
`source` varchar(20) NOT NULL COMMENT '数据来源',
`type` varchar(20) NOT NULL COMMENT '数据类型',
`id` bigint NOT NULL AUTO_INCREMENT(1) COMMENT 'ID',
`def_date` datetime(3) NOT NULL COMMENT '创建时间',
`param` variant NOT NULL COMMENT '数据实体'
) ENGINE=OLAP
DUPLICATE KEY(`source`, `type`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`def_date`, 'month'))()
DISTRIBUTED BY HASH(`source`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_external_micro_meteorology` (
`longitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的X坐标',
`latitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的Y坐标',
`altitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的Z坐标',
`size` tinyint NOT NULL COMMENT '网格大小(m)',
`horizontal_wind_speed` double NULL COMMENT '水平风速(m/s)',
`horizontal_direction` double NULL COMMENT '水平风向',
`vertical_wind_speed` double NULL COMMENT '垂直风速(m/s)',
`time` datetime NOT NULL COMMENT '时间'
) ENGINE=OLAP
DUPLICATE KEY(`longitude`, `latitude`, `altitude`)
COMMENT '微气象数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'day'))()
DISTRIBUTED BY HASH(`altitude`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
ALTER USER root@'%' IDENTIFIED BY "Qtdq5@!Lw5FgM8p#";
CREATE USER IF NOT EXISTS cmii@'%' IDENTIFIED BY 'Gy8F5jpaFG@G';
GRANT SELECT_PRIV,LOAD_PRIV ON cmii.* TO 'cmii'@'%';

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: zyly
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
namespace: zyly
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort

View File

@@ -0,0 +1,208 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
# claimName: meta
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.be-amd64:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-log
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-be
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-storage
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: zyly
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort

View File

@@ -0,0 +1,190 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: meta
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-meta-pvc
- name: log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.fe-amd64:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: log
mountPath: /opt/apache-doris/fe/log
- name: meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-fe
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: meta
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10G
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 180Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,151 @@
# doris-fe-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-fe-app
namespace: uavcloud-devflight
spec:
replicas: 1
selector:
matchLabels:
app: doris-fe-app
template:
metadata:
labels:
app: doris-fe-app
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: doris-fe-app
image: harbor.cdcyy.com.cn/cmii/doris.fe-amd64:2.1.6
env:
- name: FE_SERVERS
value: "fe1:doris-fe-service:9010" # 使用Service名称进行服务发现
- name: FE_ID
value: "1"
ports:
- containerPort: 8030
- containerPort: 9030
- containerPort: 9010 # 添加内部通信端口
volumeMounts:
- name: fe-meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: fe-log
mountPath: /opt/apache-doris/fe/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: fe-meta
persistentVolumeClaim:
claimName: doris-fe-meta-pvc
- name: fe-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
---
# doris-fe-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-fe-service
namespace: uavcloud-devflight
spec:
selector:
app: doris-fe-app
ports:
- name: http
port: 8030
targetPort: 8030
- name: query
port: 9030
targetPort: 9030
- name: edit
port: 9010 # 暴露FE内部通信端口
targetPort: 9010
---
# doris-be-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-be-app
namespace: uavcloud-devflight
spec:
replicas: 1
selector:
matchLabels:
app: doris-be-app
template:
metadata:
labels:
app: doris-be-app
spec:
imagePullSecrets:
- name: harborsecret
# initContainers:
# - name: wait-for-fe
# image: harbor.cdcyy.com.cn/cmii/busybox:latest
# command: ['sh', '-c', 'until nc -z doris-fe-service 9010; do echo waiting for fe; sleep 2; done;']
# resources:
# limits:
# memory: 2Gi
# cpu: "2"
# requests:
# memory: 200Mi
# cpu: 200m
containers:
- name: doris-be-app
image: harbor.cdcyy.com.cn/cmii/doris.be-amd64:2.1.6
env:
- name: FE_SERVERS
value: "fe1:doris-fe-service:9010"
- name: BE_ADDR
value: "doris-be-service:9050" # 使用Service名称
ports:
- containerPort: 8040
- containerPort: 9050 # 添加BE通信端口
volumeMounts:
- name: doris-be-storage
mountPath: /opt/apache-doris/be/storage
- name: doris-be-log
mountPath: /opt/apache-doris/be/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: doris-be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: doris-be-log
persistentVolumeClaim:
claimName: doris-be-log-pvc
---
# doris-be-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-be-service
namespace: uavcloud-devflight
spec:
selector:
app: doris-be-app
ports:
- name: http
port: 8040
targetPort: 8040
- name: be-port
port: 9050 # 暴露BE通信端口
targetPort: 9050

View File

@@ -0,0 +1,151 @@
# doris-fe-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-fe-app
namespace: zyly
spec:
replicas: 1
selector:
matchLabels:
app: doris-fe-app
template:
metadata:
labels:
app: doris-fe-app
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: doris-fe-app
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.fe-amd64:2.1.6
env:
- name: FE_SERVERS
value: "doris-fe-service:9010" # 使用Service名称进行服务发现
- name: FE_ID
value: "1"
ports:
- containerPort: 8030
- containerPort: 9030
- containerPort: 9010 # 添加内部通信端口
volumeMounts:
- name: fe-meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: fe-log
mountPath: /opt/apache-doris/fe/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: fe-meta
persistentVolumeClaim:
claimName: doris-fe-meta-pvc
- name: fe-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
---
# doris-fe-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-fe-service
namespace: zyly
spec:
selector:
app: doris-fe-app
ports:
- name: http
port: 8030
targetPort: 8030
- name: query
port: 9030
targetPort: 9030
- name: edit
port: 9010 # 暴露FE内部通信端口
targetPort: 9010
---
# doris-be-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-be-app
namespace: zyly
spec:
replicas: 1
selector:
matchLabels:
app: doris-be-app
template:
metadata:
labels:
app: doris-be-app
spec:
imagePullSecrets:
- name: harborsecret
# initContainers:
# - name: wait-for-fe
# image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/busybox:latest
# command: ['sh', '-c', 'until nc -z doris-fe-service 9010; do echo waiting for fe; sleep 2; done;']
# resources:
# limits:
# memory: 2Gi
# cpu: "2"
# requests:
# memory: 200Mi
# cpu: 200m
containers:
- name: doris-be-app
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.be-amd64:2.1.6
env:
- name: FE_SERVERS
value: "doris-fe-service:9010"
- name: BE_ADDR
value: "doris-be-service:9050" # 使用Service名称
ports:
- containerPort: 8040
- containerPort: 9050 # 添加BE通信端口
volumeMounts:
- name: doris-be-storage
mountPath: /opt/apache-doris/be/storage
- name: doris-be-log
mountPath: /opt/apache-doris/be/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: doris-be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: doris-be-log
persistentVolumeClaim:
claimName: doris-be-log-pvc
---
# doris-be-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-be-service
namespace: zyly
spec:
selector:
app: doris-be-app
ports:
- name: http
port: 8040
targetPort: 8040
- name: be-port
port: 9050 # 暴露BE通信端口
targetPort: 9050

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 180Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,71 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fe-configmap
namespace: zyly
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
# the output dir of stderr and stdout
LOG_DIR = ${DORIS_HOME}/log
JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xlog:gc*:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time"
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC
sys_log_mode = NORMAL
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
arrow_flight_sql_port = 9090
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
enable_fqdn_mode = true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: be-configmap
namespace: zyly
labels:
app.kubernetes.io/component: be
data:
be.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
PPROF_TMPDIR="$DORIS_HOME/log/"
JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# since 1.2, the JAVA_HOME need to be set to run BE process.
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,lg_tcache_max:20,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
arrow_flight_sql_port = 39091
brpc_port = 8060

View File

@@ -0,0 +1,94 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: zyly
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doris-fe-2000g
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-56
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2000Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doris-1-9000g-pvc
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-57
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1000Gi

View File

@@ -0,0 +1,119 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: zyly
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 1
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doriscluster-storage0
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
# notice: if the storage size less 5G, fe will not start normal.
requests:
storage: 500Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage1
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage2
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/log
name: doriscluster-storage3
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,340 @@
# Source: doris-operator/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: serviceaccount
app.kubernetes.io/instance: controller-doris-operator-sa
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: doris-operator
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/finalizers
verbs:
- update
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: doris-operator/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: clusterrolebinding
app.kubernetes.io/instance: doris-operator-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: doris-operator
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/leader-election-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: role
app.kubernetes.io/instance: leader-election-role
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-role
namespace: zyly
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: doris-operator/templates/leader-election-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: rolebinding
app.kubernetes.io/instance: leader-election-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-rolebinding
namespace: zyly
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-operator
namespace: zyly
labels:
control-plane: doris-operator
app.kubernetes.io/name: deployment
app.kubernetes.io/instance: doris-operator
app.kubernetes.io/component: doris-operator
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
spec:
selector:
matchLabels:
control-plane: doris-operator
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: doris-operator
labels:
control-plane: doris-operator
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
# It is considered best practice to support multiple architectures. You can
# build your manager image using the makefile target docker-buildx.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
containers:
- command:
- /dorisoperator
args:
- --leader-elect
image: 172.16.100.55:8033/cmii/doris.k8s-operator:1.3.1
name: dorisoperator
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
requests:
cpu: 2
memory: 4Gi
limits:
cpu: 2
memory: 4Gi
serviceAccountName: doris-operator
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,28 @@
apiVersion: v1
clusters:
- name: clusterIpByhncm
cluster:
server: https://36.137.146.187:6443
insecure-skip-tls-verify: true
- name: clusterDNSByhncm
cluster:
server: https://apiserver.cluster.local:6443
certificate-authority-data: >-
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3RENDQWRTZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQ0FYRFRJME1EY3lOREUyTXpJeE0xb1lEekl4TWpRd056QXhNVFl6TWpFeldqQVZNUk13RVFZRApWUVFERXdwcmRXSmxjbTVsZEdWek1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA0a3gyZFRpTHpCQWZPU2FjVkV5dmlIOE1GNTZBYmlzZnY1cXFOUVVlUk84ekFrVWx1NENqRHkzRDhTVVJiVnoKTmFKUXMwc3RNU00zd09mNDFFbGVZREF0amR3ZXpNZDZDdXlaYkhPQnRteE9heGdTdWlid2hqSGcwTTZzRndiTQplNzJVYy8rcGY2R3dqaDdXc0N0bk02ZG1hZ0FmVzhicGJOMWNIRERIblRJbzlIbEhBNURFUE1GYkk3VGkyQ0hZCjl6UE01MlpZazR1M2RuWDRPQWI3K0hXUjdidEFLYzFIdWxreEhqOHZnNXN1bmtGSjhia1BGYmRQY3BmSmhPREYKSjljcVllUXpYWGpXSmZvUFBGZFNlMDAvc2NXYlNSYjlRa1Z3QXl5VTRKcVl3UU9CUDIxNm9Sd3FUTUJjTkk4WQpQRHgwY0paVjliVXpEazNtdVN4eFF3SURBUUFCbzBVd1F6QU9CZ05WSFE4QkFmOEVCQU1DQXFRd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFkQmdOVkhRNEVGZ1FVcFAyalRRQU1JanIrQ1QxTkwxZXkxT2J1UWtZd0RRWUoKS29aSWh2Y05BUUVMQlFBRGdnRUJBSGhRM3NkSzV6WitSV0YxWWgvQklxMHE3NXpMcm1BdEFoNWxxL01sTFgvcApuTjhZYm9QZk4wdG84ekxtbnNCcHFDby9nOXozUENyS3lFbkkwNVBwYkkveHk4MW0xUXdrcEVMUWJPTVFXbk9JCnpRZmJhTktXVFJiR2pYWGtPMWVkMVZmV0YyN2p4amhlSW1kWWZQQjQxYkVjMGh4bnRNazB0NXdxZ3M2ZFVwdUMKQk9vVUhqOUxVUDIwZ0VqUmhEbnZPMGNERmU5SGd4Z1E2QTJnUDcraW8rbUx6M0xlWDR4a2ZRcXM1YWtEQVVocwpvTXBKUUlmZnRjZzRiWjR6dWZRT0hVaDM2QVBWSVI1NTkreUp5V1FhdlRyVUFQTjFPNG1kaTlOVEN2bXp3K3RnCmZaMVNtc3B2SFVDU1ZXM1BxQ3ZQaXNXeWFYd3dMUjRORThDZjIycXRVZXM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
users:
- name: userByhncm
user:
client-certificate-data: >-
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJekNDQWd1Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJMU1EUXlOVEEyTURnek9Gb1hEVE13TURReU5EQTJNRGd6T0Zvd1N6RVRNQkVHQTFVRQpDaE1LYzNsemRHVnRPbXRqY3pFME1ESUdBMVVFQXhNcllXUmhaVE0xWkdRME56WXhOREpsTlRrelpETTBaRE5rCk1tTmhOR0ZoTkRJdE1qSXhOREl3TWpBMk16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0MKZ2dFQkFONFRzRFBBTWJnNk4wY2l4Q002YjhmeHpUeFBWaG9LL2REemxVeXlqZEpnQ2hLNFpLVzhqR3paa3p1SQo0d0NxWmx1cVBBeGNoY0xnS1hjVnJZSFF6OWhoT2NRL2hHK3c4Y2Znck16MFlGckpDRlhXU2NTN3lJbTJGNHk2CkFWenlTbVAzMktTdVRzL0xGZlJYam5sclNVSjJyeWFaWlhBUENRMXJaT3BQRnZiQTA5VzY1WTJLMHJFZ25tYUQKVng4SUFGUXVtQS82NUpqQ2NmK0pLS0p4cTlnU2l4RHFZTXF5MGxianU3cVNVeFhUaFR3MGsvYjdmM1d4U0phTgp3RnU0ckE1czhPYTRhTjd3STAxWmllNmMvS2dxbUVkVDIxclFvMTFRUCs3OFdmcGc4bDIvOFd1WVZiY0pWTmZwClEzMlplWmJwZFUrZ2pnVC92YkY2bWpmdEJNRUNBd0VBQWFOSU1FWXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUcKQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01DTUI4R0ExVWRJd1FZTUJhQUZLVDlvMDBBRENJNi9nazlUUzlYc3RUbQo3a0pHTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCdFcrOStNeWQ0b0FIdDBYK203ckdlR01paXVhbXhoM3RlCjc2YUtnemRDUDU0cS8yd3hmZHZlRmhyT1Roamw4bkpSZzJMUGRFSjR2ek5WWW9lMDc1L3hjbDRFQ3Y5dUUrM2UKSkFzSjBjSlEzMmlmRGZvRmovNE52djZSN2J1bXpwOGlKK0UxOXBmZ3ZXeDFzcHNqYXgzOVBqTUtseWhqSGp6YwpIOXpLeXV1QXkyMG10c0c1RW5mcnlSb2pqS0NGd0xNK0dnT21rYlRVRGczZk9jSXFQT3pYdVd4OHFzaTB0dk1oCkJyUXhRMVlWVkN4UlBLOW4rYzNHVmdCZE5ZdTRJSVFURmdBNURhUTZ4WXZzamo4eXpIbTdOYTJwMTZ2ODFnVlEKOEJxdHBOQWQwZlBQUDdjZ0xucUNaNGExalFxQld1UlExclhyNFgzK1R4NVp1azNsNEVlRgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: >-
LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBM2hPd004QXh1RG8zUnlMRUl6cHZ4L0hOUEU5V0dncjkwUE9WVExLTjBtQUtFcmhrCnBieU1iTm1UTzRqakFLcG1XNm84REZ5Rnd1QXBkeFd0Z2REUDJHRTV4RCtFYjdEeHgrQ3N6UFJnV3NrSVZkWkoKeEx2SWliWVhqTG9CWFBKS1kvZllwSzVPejhzVjlGZU9lV3RKUW5hdkpwbGxjQThKRFd0azZrOFc5c0RUMWJybApqWXJTc1NDZVpvTlhId2dBVkM2WUQvcmttTUp4LzRrb29uR3IyQktMRU9wZ3lyTFNWdU83dXBKVEZkT0ZQRFNUCjl2dC9kYkZJbG8zQVc3aXNEbXp3NXJobzN2QWpUVm1KN3B6OHFDcVlSMVBiV3RDalhWQS83dnhaK21EeVhiL3gKYTVoVnR3bFUxK2xEZlpsNWx1bDFUNkNPQlArOXNYcWFOKzBFd1FJREFRQUJBb0lCQUdWQkF0REZxNlc4M1dGUQp4NjdzUVBVZ0daeUs2ODRaL3RvYmhaSWZ3eXdOWCtwaktuaXF2RGdSQitGWUszQXRRVmhBQ1ByMTRJTVc0N0VKCk1FQUZMZzhSWFY1T081c1ZTVmdCNTZmWE1HdVVSM21qcDZUTW5jVzBRWTZIYklHdGN4K2JiS3JCVXV2SlEreFIKVTRPbTJCTTJ1K3RVZTN5WlRsNVEvZ1ViSkhSQ1l6a0JtUVZUd29JeGRTb0VaV1RUNHhBa2E4anBMajgxV2pZdQpXdTIyWW9qNEZLYmtpNExsNzhEdzRYVHg5S0RLN1ArcGZINk0xQ1ZBanRuTWI3Qmo0NkJML3dhREFHWCs4YU5ECnNOcExGSDQ2SEQ3ZGR6L3g5TDc0ODdYVG4yMC9kVm9pMkh5SUlaZWt6ZTZUZzlLNk42SEpIejVMQ1kxUmRHOXEKa3pxU2w4RUNnWUVBL2RFSVZlWWtTVXU1YVlaSitTQmQ5bkphWGFKSGc5UTIwQ09nYlNyRG5pdHJkbDFzMG5XcApUL2UvWmhzRmpBV1ZiT3RTeWI4K0hEdVA2NlZ0UkVSUXBMZjUvVDFDUDF5S2Z6a0I2KzlBM3lJRW5URWdEZU9HClJuRzF6a1poUXZ4Z29ESldVS1FTVEtheXFEVkpFTXllOUlRY0d6SitmRElaYjVuSnk2a2V2UmtDZ1lFQTMvekIKMCsraUtxaTc1bVVxdXcwU2NOeHRiZnZabEF4aXhXaW9IZHpRZTd4WHU4NlB2YTRRbXlIYXZ4SSs0TzRxWkZaWQpBa3lRYmM2WWpQbk5tYTgyMmlTOGNyUE5WZHFUMXN2VFh2NWJwNDdZKzNiQ0xGelZOU0ppbzZJNFpwVnJ4cFBDCnhqSzgvbUhlemRqMHM5NEVRZjNIc29ReURJc1duQlQ1aDMvT1Vla0NnWUFrWHNLeUJHTm91VmdkUGVmMXNlU2sKL3VmaUNQSDBld0ZMdS84dUJYVzRnWUpKbU1PU09OZ0ZKSHdodG5lS0EvYlFmN3dDT3N3OG9YQlRGUVplUk9mRgpZck9JelRGa0FPKzdvVUNjUFZGdlorRi9ZTjIxLzhkcTZycGhqNFk2WWcyNmh0d0ZmQzAvSEpmM01JT0N5Nzl2ClRFcjJ2cnFQTTJLZnNrclBTekdqYVFLQmdRRFNWV0lXaVFMNW1EWEtwdG0zaVJEMkZxaFVEWnBGNGxPbUdSODEKMjdPVFhrdnN1bzVVaW01aG55cnhIa3Nab2MwQkordkJUUENiak5QSUhwUXBhMVhSQjRwSEUvMFFVQTlnTjBzbgoreWRLYzJQaXo2U0xVQ21PbWRtUzI3U204RTJpemdLclo5Mzk2eDMwbzNVMnFwZkkrRzFjZjdNWUFFeWRDVDFCCk5vMGljUUtCZ1FDY2dueEtzYVowZzF2VHYyOXRyZHBUcS9UVWVKek1jSlU3WEp0Z0p2OXJMamFqN2lhb01UeTIKY2xSVkRjVzN5OUhualN0Rk9GN0gxVlBLZEFvaUlWdWs5TjlLalk3VkRUWmdPY0QwU3NKd1lMOUl2SEFIT3ArZAozUVpZV3VBQnBlaFlqK2svQm0xWjVyVUg5S05uQ24zRVJ0RHNCTGJYRUEvemlONnFEZnpSSnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
contexts:
- name: cxtIPByhncm
context:
cluster: clusterIpByhncm
user: userByhncm
- name: cxtDNSByhncm
context:
cluster: clusterDNSByhncm
user: userByhncm
current-context: cxtIPByhncm

View File

@@ -0,0 +1,174 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-watchdog
namespace: zyly
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-watchdog
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-uav-watchdog:2025-04-10-15-24-03
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zyly
- name: APPLICATION_NAME
value: cmii-uav-watchdog
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.memory
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/cmii-uav-watchdog/
readOnly: false
subPath: zyly/cmii-uav-watchdog
- name: mysql-data
mountPath: /bitnami/mysql
- name: cmii-uav-watchdog-conf
mountPath: /cmii/cmii-uav-watchdog/config.yaml
subPath: config.yaml
volumes:
- name: mysql-data
persistentVolumeClaim:
claimName: helm-mysql
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: cmii-uav-watchdog-conf
configMap:
name: cmii-uav-watchdog-configmap
items:
- key: config.yaml
path: config.yaml
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-watchdog
namespace: zyly
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: backend
cmii.app: cmii-uav-watchdog
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-watchdog-configmap
namespace: zyly
data:
config.yaml: |-
server:
port: "8080" # 服务器端口
tier_one_auth:
tier_one_secret: "NK537TIWSUOFIS7SYCUJ6A7FPOGFVM3UH67TJRX3IYQAHKZXK2X7SBAA6JOXZVSV3U6K5YZUX7Q6TWOPK6YCRU6MIML33ZJFBN55I2Q" # TOTP密钥
time_offset_allowed: 30 # 允许的时间偏移(秒)
watchdog_center:
url: "https://watchdog-center.example.com" # 一级授权中心地址
project:
project_namespace: "zyly" # 项目命名空间
tier_two_auth:
tier_two_secret: "your_tier_two_secret_here" # 二级授权密钥

View File

@@ -0,0 +1,138 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: zyly
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uavms-pyfusion
namespace: zyly
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: pyfusion-conf
configMap:
name: pyfusion-configmap
items:
- key: config.yaml
path: config.yaml
containers:
- name: cmii-uavms-pyfusion
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-uavms-pyfusion:6.3.3'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: VERSION
value: 6.2.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uavms-pyfusion
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: K8S_NAMESPACE
value: zyly
- name: APPLICATION_NAME
value: cmii-uavms-pyfusion
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: zyly/cmii-uavms-pyfusion
- name: pyfusion-conf
mountPath: /app/config.yaml
subPath: config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uavms-pyfusion
namespace: zyly
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
type: ClusterIP
sessionAffinity: None

View File

@@ -0,0 +1,9 @@
kind: Secret
apiVersion: v1
metadata:
name: harborsecret
namespace: zyly
data:
.dockerconfigjson: >-
ewogICAgICAgICJhdXRocyI6IHsKICAgICAgICAgICAgICAgICJobmNtLWZjNzY2Zjg0LmVjaXMuY2hhbmdzaGEtMi5jbWVjbG91ZC5jbiI6IHsKICAgICAgICAgICAgICAgICAgICAgICAgImF1dGgiOiAiZW5sc2VUcFdNbko1VTNSeVFHNW5VSE56IgogICAgICAgICAgICAgICAgfQogICAgICAgIH0KfQ==
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: zyly
name: helm-minio
spec:
serviceName: helm-minio-service
replicas: 1
selector:
matchLabels:
app: helm-minio
template:
metadata:
labels:
app: helm-minio
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: minio
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/minio:RELEASE.2023-06-02T23-17-26Z
command: ["/bin/sh", "-c"]
args:
- minio server /data --console-address ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ACCESS_KEY
value: "cmii"
- name: MINIO_SECRET_KEY
value: "B#923fC7mk"
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumes:
- name: data
persistentVolumeClaim:
claimName: helm-minio
---
apiVersion: v1
kind: Service
metadata:
name: helm-minio-service
namespace: zyly
spec:
selector:
app: helm-minio
ports:
- name: api
port: 9000
targetPort: 9000
- name: console
port: 9001
targetPort: 9001
clusterIP: None

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,644 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "iot",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: zyly
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "zyly.hncmict.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmiidashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmiimetrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,286 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: zyly
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: zyly
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "zyly"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: zyly
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: zyly
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zyly
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zyly
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: zyly
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: zyly
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 31085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 31083
- port: 8883
name: mqtt-ssl
targetPort: 8883
nodePort: 31183
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: zyly
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

View File

@@ -0,0 +1,203 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: zyly
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uasms
namespace: zyly
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uasms
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-uav-platform-uasms:master-2.0-pro-250428
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zyly
- name: APPLICATION_NAME
value: cmii-uav-platform-uasms
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uasms
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uasms
namespace: zyly
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uas
namespace: zyly
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uas
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-uav-platform-uas:master-2.0-pro-250428-uas
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zyly
- name: APPLICATION_NAME
value: cmii-uav-platform-uas
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uas
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uas
namespace: zyly
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.0
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528

View File

@@ -0,0 +1,350 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: zyly
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/iot)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: zyly.hncmict.com
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-iot-manager
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: zyly
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: zyly.hncmict.com
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,84 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: zyly
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
type: ClusterIP
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: zyly
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/mongo:5.0
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,418 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: zyly
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zyly
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zyly
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zyly
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zyly
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: zyly
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zyly
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
containers:
- name: mysql
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits:
memory: 4Gi
cpu: "4"
requests:
memory: 4Gi
cpu: "2"
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
persistentVolumeClaim:
claimName: helm-mysql

View File

@@ -0,0 +1,136 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: zyly
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: zyly
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.0
spec:
type: ClusterIP
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: zyly
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: uas-2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmiibusybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmiinfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: hcms-efs-class
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: hcms-efs-class
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mysql
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: helm-mysql
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-minio
namespace: zyly
labels:
cmii.type: middleware-base
cmii.app: helm-mysql
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.0
spec:
storageClassName: hcms-efs-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 500Gi

View File

@@ -0,0 +1,334 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zyly
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
spec:
type: ClusterIP
ports:
- name: amqp
port: 5672
targetPort: amqp
- name: dashboard
port: 15672
targetPort: dashboard
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zyly
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: zyly
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zyly
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zyly
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zyly
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zyly
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zyly
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: zyly
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zyly
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zyly
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.zyly.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: zyly
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.137.146.187;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: dkjg.hncmict.com
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: zyly/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: zyly/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://helm-minio:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: zyly/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: zyly
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: uas-2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: uas-2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: uas-2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.137.146.187:31935'
rtsp: 'rtsp://36.137.146.187:30554'
srt: 'srt://36.137.146.187:30556'
flv: 'http://36.137.146.187:30500'
hls: 'http://36.137.146.187:30500'
rtc: 'webrtc://36.137.146.187:30080'
replay: 'https://36.137.146.187:30333'
minio:
endpoint: http://helm-minio:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,24 @@
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/all_tables_jianguan.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/sense_adapter.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/uav_lifecycle.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/uav_notice.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/z_cmii_nacos_config.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/doris_table_init.sql
wget --no-check-certificate https://oss.demo.uavcmlc.com/cmlc-installation/tmp/uav_lifecycle_0514.sql
mc rm demo/cmlc-installation/tmp/all_tables_jianguan.sql
mc rm demo/cmlc-installation/tmp/sense_adapter.sql
mc rm demo/cmlc-installation/tmp/uav_lifecycle.sql
mc rm demo/cmlc-installation/tmp/uav_notice.sql
mc rm demo/cmlc-installation/tmp/z_nacos_config_info.sql
mc rm demo/cmlc-installation/tmp/uav_lifecycle_0514.sql
mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P3306 < uav_lifecycle_0514.sql
mysql -hdoris-cluster-fe-service -P9030 -uroot < doris_table_init.sql

View File

@@ -1,32 +1,32 @@
server {
listen 8889;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location /electronic {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
location /satellite {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
# /root/offline_map/satellite /root/offline_map/electronic
server {
listen 8889;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location /electronic {]
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
location /satellite {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
# /root/offline_map/satellite /root/offline_map/electronic
}

View File

@@ -1,50 +1,50 @@
upstream proxy_server {
ip_hash;
server 172.16.100.55:30500;
server 172.16.100.59:30500;
server 172.16.100.60:30500;
}
server {
listen 8088;
server_name localhost;
location / {
proxy_pass http://proxy_server;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.eedsjc-uavms.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
upstream proxy_server {
ip_hash;
server 172.16.100.55:30500;
server 172.16.100.59:30500;
server 172.16.100.60:30500;
}
server {
listen 8088;
server_name localhost;
location / {
proxy_pass http://proxy_server;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.eedsjc-uavms.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -1,44 +1,44 @@
user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 65535;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
server_tokens off;
sendfile on;
send_timeout 1200;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
client_max_body_size 2048m;
client_body_buffer_size 2048m;
underscores_in_headers on;
proxy_send_timeout 600;
proxy_read_timeout 600;
proxy_connect_timeout 600;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
include /etc/nginx/conf.d/*.conf;
}
stream {
include /etc/nginx/conf.d/stream/*.conf;
user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 65535;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
server_tokens off;
sendfile on;
send_timeout 1200;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
client_max_body_size 2048m;
client_body_buffer_size 2048m;
underscores_in_headers on;
proxy_send_timeout 600;
proxy_read_timeout 600;
proxy_connect_timeout 600;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
include /etc/nginx/conf.d/*.conf;
}
stream {
include /etc/nginx/conf.d/stream/*.conf;
}

View File

@@ -1,17 +1,17 @@
version: '3'
services:
cmii-nginx:
image: 172.16.100.55:8033/cmii/nginx:1.27.0
volumes:
- /etc/nginx/conf.d:/etc/nginx/conf.d
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
- /root/offline_map:/root/offline_map
ports:
- "8088:8088"
- "8089:8089"
restart: always
# mkdir -p /etc/nginx/conf.d
version: '3'
services:
cmii-nginx:
image: 172.16.100.55:8033/cmii/nginx:1.27.0
volumes:
- /etc/nginx/conf.d:/etc/nginx/conf.d
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
- /root/offline_map:/root/offline_map
ports:
- "8088:8088"
- "8089:8089"
restart: always
# mkdir -p /etc/nginx/conf.d
# touch /etc/nginx/nginx.conf

View File

@@ -1,46 +1,46 @@
vim /etc/systemd/system/cmii-startup.service
[Unit]
Description=Cmii Start Up Script
[Service]
ExecStart=/bin/bash /cmii/start-up.sh
User=root
Group=root
[Install]
WantedBy=multi-user.target
vim /cmii/start-up.sh
docker-compose -f /cmii/harbor/docker-compose.yml up -d
sleep 10
docker-compose -f /cmii/0-minio-dockercompose.yml up -d
rm -rf /nfsdata/zhbf-helm-emqxs-pvc-fdb605a0-5120-481a-bdd5-7ef1213c2363/
sleep 5
kubectl delete -n zhbf pod helm-nacos-0 --force
kubectl delete -n zhbf pod helm-emqxs-0 --force
kubectl delete -n zhbf pod helm-redis-master-0 --force
kubectl delete -n zhbf pod helm-redis-replicas-0 --force
sleep 30
for kindof in pods
do
kubectl -n zhbf delete $kindof $(kubectl -n zhbf get $kindof | grep "cmii"| awk '{print$1}')
done
chmod +x /cmii/start-up.sh
systemctl daemon-reload
sudo systemctl enable cmii-startup.service
vim /etc/systemd/system/cmii-startup.service
[Unit]
Description=Cmii Start Up Script
[Service]
ExecStart=/bin/bash /cmii/start-up.sh
User=root
Group=root
[Install]
WantedBy=multi-user.target
vim /cmii/start-up.sh
docker-compose -f /cmii/harbor/docker-compose.yml up -d
sleep 10
docker-compose -f /cmii/0-minio-dockercompose.yml up -d
rm -rf /nfsdata/zhbf-helm-emqxs-pvc-fdb605a0-5120-481a-bdd5-7ef1213c2363/
sleep 5
kubectl delete -n zhbf pod helm-nacos-0 --force
kubectl delete -n zhbf pod helm-emqxs-0 --force
kubectl delete -n zhbf pod helm-redis-master-0 --force
kubectl delete -n zhbf pod helm-redis-replicas-0 --force
sleep 30
for kindof in pods
do
kubectl -n zhbf delete $kindof $(kubectl -n zhbf get $kindof | grep "cmii"| awk '{print$1}')
done
chmod +x /cmii/start-up.sh
systemctl daemon-reload
sudo systemctl enable cmii-startup.service

View File

@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
DockerRegisterDomain="192.168.0.8:8033" # 需要根据实际修改
DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
@@ -38,9 +38,9 @@ Download_Load_Tag_Upload() {
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
local_gzip_path="$local_gzip_path/uavms-2.0"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/6.2.0-from-demo/"
oss_prefix_url="$oss_prefix_url/uavms-2.0/"
dltu
shift # past argument
;;
@@ -116,9 +116,9 @@ Load_Tag_Upload(){
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/6.1.1"
local_gzip_path="$local_gzip_path/uavms-2.0"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/6.1.1/"
oss_prefix_url="$oss_prefix_url/uavms-2.0/"
ltu
shift # past argument
;;
@@ -163,6 +163,6 @@ test(){
}
# test
Download_Load_Tag_Upload "cmii"
#Download_Load_Tag_Upload "cmii"
# Load_Tag_Upload "cmii"
Load_Tag_Upload "cmii"

View File

@@ -1,63 +1,63 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/coreos-flannel:v0.13.0-rancher1
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/cluster-proportional-autoscaler:1.8.1
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -1,19 +1,19 @@
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v5.0.195
emqx/emqx:4.4.19
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2-slim
mongo:5.0
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.24.0
redis:6.0.20-alpine
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest
bitnami/redis:6.2.14-debian-11-r1
bitnami/mysql:8.1.0-debian-11-r42
simonrupf/chronyd:0.4.3
bitnami/bitnami-shell:11-debian-11-r136
bitnami/rabbitmq:3.11.26-debian-11-r2
ossrs/srs:v5.0.195
emqx/emqx:4.4.19
emqx/emqx:5.5.1
nacos/nacos-server:v2.1.2-slim
mongo:5.0
bitnami/minio:2023.5.4
kubernetesui/dashboard:v2.0.1
kubernetesui/metrics-scraper:v1.0.4
nginx:1.24.0
redis:6.0.20-alpine
dyrnq/nfs-subdir-external-provisioner:v4.0.2
jerrychina2020/rke-tools:v0.175-linux
jerrychina2020/rke-tools:v0.175
busybox:latest

View File

@@ -0,0 +1,23 @@
#!/bin/bash
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image"
local_gzip_path="$local_gzip_path/6.1.1"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/6.1.1/"
cd $local_gzip_path || exit
wget "$oss_prefix_url$gzip_image_list_txt"
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "download ok !"
echo ""
fi
done <"${gzip_image_list_txt}"

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=szgz
namespace=eedsjc-uavms
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq

View File

@@ -6,7 +6,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=cqejpt
export name_space=eedsjc-uavms
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force

View File

@@ -0,0 +1,174 @@
#!/bin/bash
# 脚本描述安装Golang并设置国内代理源
# 适用环境Linux系统支持Ubuntu、CentOS等主流发行版
# Go版本1.24.2
# 下载源:阿里云镜像
# 变量定义
GO_VERSION="go1.24.2"
GO_TAR="${GO_VERSION}.linux-amd64.tar.gz"
DOWNLOAD_URL="https://mirrors.aliyun.com/golang/${GO_TAR}"
INSTALL_DIR="/usr/local"
GO_DIR="${INSTALL_DIR}/go"
TMP_DIR="/tmp"
GOPROXY_URL="https://goproxy.cn,direct"
# 颜色输出函数
print_info() {
echo -e "\033[32m[INFO]\033[0m $1"
}
print_error() {
echo -e "\033[31m[ERROR]\033[0m $1"
}
print_warning() {
echo -e "\033[33m[WARNING]\033[0m $1"
}
# 检查是否为root用户或有sudo权限
check_privileges() {
if [ "$EUID" -ne 0 ]; then
if ! command -v sudo &> /dev/null; then
print_error "需要root权限或sudo命令来执行安装请以root用户运行或安装sudo。"
exit 1
fi
print_warning "当前非root用户将使用sudo执行部分命令。"
fi
}
# 检查依赖工具
check_dependencies() {
local deps=("wget" "tar")
for dep in "${deps[@]}"; do
if ! command -v "$dep" &> /dev/null; then
print_info "安装依赖工具 $dep..."
if [ "$EUID" -eq 0 ]; then
apt update && apt install -y "$dep" || yum install -y "$dep" || dnf install -y "$dep"
else
sudo apt update && sudo apt install -y "$dep" || sudo yum install -y "$dep" || sudo dnf install -y "$dep"
fi
if [ $? -ne 0 ]; then
print_error "安装 $dep 失败,请手动安装后再运行脚本。"
exit 1
fi
fi
done
}
# 下载Go安装包
download_go() {
print_info "开始下载Golang安装包..."
if [ -f "${TMP_DIR}/${GO_TAR}" ]; then
print_info "安装包已存在,跳过下载。"
return 0
fi
if ! wget -O "${TMP_DIR}/${GO_TAR}" "$DOWNLOAD_URL"; then
print_error "下载Golang失败请检查网络连接或URL是否有效。"
exit 1
fi
print_info "下载完成:${TMP_DIR}/${GO_TAR}"
}
# 安装Go
install_go() {
print_info "开始安装Golang到${INSTALL_DIR}..."
if [ -d "$GO_DIR" ]; then
print_warning "目录${GO_DIR}已存在,将被覆盖。"
if [ "$EUID" -eq 0 ]; then
rm -rf "$GO_DIR"
else
sudo rm -rf "$GO_DIR"
fi
fi
if [ "$EUID" -eq 0 ]; then
tar -C "$INSTALL_DIR" -xzf "${TMP_DIR}/${GO_TAR}"
else
sudo tar -C "$INSTALL_DIR" -xzf "${TMP_DIR}/${GO_TAR}"
fi
if [ $? -ne 0 ]; then
print_error "解压安装包失败,请检查文件完整性。"
exit 1
fi
print_info "Golang安装完成。"
}
# 设置环境变量
setup_env() {
print_info "设置环境变量..."
local profile_file=""
if [ -f "$HOME/.bashrc" ]; then
profile_file="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
profile_file="$HOME/.bash_profile"
elif [ -f "$HOME/.zshrc" ]; then
profile_file="$HOME/.zshrc"
else
print_warning "未找到常见的shell配置文件将使用/etc/profile需要root权限。"
profile_file="/etc/profile"
fi
# 检查是否已设置环境变量,避免重复添加
if ! grep -q "/usr/local/go/bin" "$profile_file"; then
cat >> "$profile_file" << 'EOF'
# Go environment variables
export PATH=$PATH:/usr/local/go/bin
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
EOF
print_info "环境变量已写入${profile_file}"
else
print_info "环境变量已存在,跳过写入。"
fi
# 立即生效(仅对当前会话)
export PATH=$PATH:/usr/local/go/bin
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
}
# 设置Go代理
setup_proxy() {
print_info "设置Go国内代理源..."
if command -v go &> /dev/null; then
go env -w GOPROXY="$GOPROXY_URL"
if [ $? -eq 0 ]; then
print_info "代理源已设置为:$GOPROXY_URL"
else
print_error "设置代理源失败,请手动运行 'go env -w GOPROXY=$GOPROXY_URL'"
fi
else
print_error "Go命令未找到代理设置失败。请检查安装是否成功。"
fi
}
# 验证安装
verify_installation() {
print_info "验证Golang安装..."
if command -v go &> /dev/null; then
go_version=$(go version)
print_info "Golang版本$go_version"
goproxy=$(go env GOPROXY)
print_info "GOPROXY设置$goproxy"
else
print_error "Golang未正确安装请检查错误信息。"
exit 1
fi
}
# 主函数
main() {
check_privileges
check_dependencies
download_go
install_go
setup_env
setup_proxy
verify_installation
print_info "Golang安装和配置完成请运行 'source ~/.bashrc' 或重新登录以应用环境变量。"
}
# 执行主函数
main

View File

@@ -1,9 +1,9 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
#swapoff -a
#cp -f /etc/fstab /etc/fstab_bak
#cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
@@ -50,20 +50,20 @@ t
8e
w
" | fdisk /dev/sdc
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/sdc1
vgcreate ${VG_NAME} /dev/sdb1
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
#mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
#mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /home/app-plus
#mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a

View File

@@ -1,7 +1,7 @@
insert into admin_user.`sys_user`(`id`,`is_system_admin`,`is_company_admin`,`name`,`account`,`job_no`,`telephone`,`email`,`salt`,`password`,`avatar`,`org_id`,`company_id`,`is_del`,`is_frozen`,`create_at`,`update_at`,`create_by`,`update_by`) values
(1,1,'\0','管理员','admin',NULL,'15108281409','136533479@qq.com','ho7r','6c98365cbab7d8bc1d9fe75e3d22b1b5','uav/img/1594951885747_微信图片_20200702105139.png',NULL,NULL,'\0','\0','2020-04-30 09:39:06','2021-05-27 19:54:24',NULL,NULL);
update admin_user.sys_user set password = MD5(CONCAT(MD5('cvi!LRtUWd'),salt)), avatar = null where id = 1;
update admin_user.sys_user set password = MD5(CONCAT(MD5('cvi!LRtUWd'),salt)), avatar = null where id = 1;
INSERT INTO `uav_user`.`sys_user`(`id`, `is_system_admin`, `is_company_admin`, `name`, `account`, `telephone`, `email`, `avatar`, `org_id`, `company_id`, `is_del`, `is_frozen`, `create_at`, `update_at`, `create_by`) VALUES (1, b'1', b'0', '凌云超管', 'admin', '18008002781', 'liuchunrong@cmii.chinamobile.com', 'personnelInfo/image/timg (2).jpg', NULL, NULL, b'0', b'0', '2020-04-30 09:39:06', '2021-07-15 11:03:58', NULL);

View File

@@ -3,12 +3,15 @@
# 将sql文件上传到mysql的目录 /var/lib/docker/mysql-pv
# 修改目录的权限为
chown 1001:1001 /var/lib/docker/mysql-pv/6.2.0/
export sql_file_folder_name=uavms
chown 1001:1001 /var/lib/docker/mysql-pv/ynydapp/${sql_file_folder_name}/
# 然后执行mysql的pod
INSERT INTO `uav_lifecycle`.`regulator` (`id`, `name`, `is_system_admin`, `telephone`, `avatar_url`, `authentication_status`, `authentication_time`, `password`, `password_modify_time`, `is_frozen`, `is_del`, `create_at`, `create_by`, `update_at`, `update_by`) VALUES (1, '超级管理员', b'1', LOWER(HEX(AES_ENCRYPT('13800000000','TELEPHONE'))), NULL, 0, NULL, '$2a$10$zaAxaqvNzx8HdERMTrOF6u.InuKLSSi2VGQDBmYuEIG56ZqV6TwBu', NOW(), b'0', b'0', NOW(), 'r_1', NOW(), 'r_1');
export sql_file_folder_name=6.2.0
export sql_file_folder_name=uas-2.0
export local_mysql_host_path="/var/lib/docker/mysql-pv/$sql_file_folder_name"
export sql_import_file_path="/bitnami/mysql/${sql_file_folder_name}"
for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
@@ -40,7 +43,6 @@ cp ${local_mysql_host_path}/cmii_nacos_config_wdd.sql /root/wdd/install/cmii_nac
# /root/wdd/mysql/bin/mysql -uroot -pGwubc6CxRM -h192.168.35.178 -P33307 <"$sql_import_file_path/${sql_file}"
# devoperation
# /root/wdd/mysql/bin/mysql -uroot -pGwubc6CxRM -h192.168.35.178 -P33308 <"$sql_import_file_path/${sql_file}"
# chongqing san hua
# /root/wdd/mysql/bin/mysql -uroot -pQzfXQhd3bQ -h36.133.115.164 -P53309 <"$sql_import_file_path/${sql_file}"

View File

@@ -91,4 +91,8 @@ cvi!LRtUWd
cvi!LRtUZsj
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
uasms
13800000000
MENF!BnG&U4k