新增雄安空能院项目

This commit is contained in:
zeaslity
2025-08-12 09:59:32 +08:00
parent ce4165e36b
commit 4b274a02c8
79 changed files with 16048 additions and 211 deletions

279
.idea/workspace.xml generated
View File

@@ -4,101 +4,92 @@
<option name="autoReloadType" value="SELECTIVE" /> <option name="autoReloadType" value="SELECTIVE" />
</component> </component>
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="common update"> <list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="修改CICD的jenkins构建脚本">
<change afterPath="$PROJECT_DIR$/54-202501-深圳规自-ARM/部署yaml/ts2mp4.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/58-202503-新DEMO环境/1-磁盘挂载.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/cmii-uavms-pyfusion.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/doris部署/doris-docker.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/ai-config.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/doris部署/doris_table_init.sql" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/all_tables_jianguan_已有行业应用.sql" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/0-节点lable.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/config_info.sql" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/uav_lifecycle.sql" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/64-202504-云南二级监管/数据库初始化/uav_notice.sql" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-configmap.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-internal-service.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-service.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-configmap.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/install_docker_offline.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/sshd_config" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-service.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/分块压缩合.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/压缩文件包.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-pvc.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-deployment-dev.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/install_auth.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-deployment.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-web.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-simple-k8s-失败/doris-pvc.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-configmap.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/主机授权文件.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deployment-pv.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/授权码.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-kind.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-operator.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/hunan-cluster.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/cmii-uav-watchdog-test.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/cmii-uavms-pyfusion.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/harbor-secret.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/helm-minio.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-backend.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-configmap.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-dashboard.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-emqx.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-frontend.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-ingress.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-mongo.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-mysql.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nacos.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nfs-test.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-nfs.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-pvc.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-rabbitmq.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-redis.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/yaml/k8s-srs.yaml" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/数据库脚本下载.txt" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/下载镜像.sh" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/安装golang.sh" afterDir="false" /> <change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/disk.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/19-上海雷视/下载文件.txt" beforeDir="false" afterPath="$PROJECT_DIR$/19-上海雷视/下载文件.txt" afterDir="false" /> <change beforePath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/DEV批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/26-开发高可用集群/DEV批量命令.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" beforeDir="false" afterPath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/26-开发高可用集群/时间同步结果.txt" beforeDir="false" /> <change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/63-202504-CICD新/1-jenkins安装.txt" beforeDir="false" afterPath="$PROJECT_DIR$/63-202504-CICD新/1-jenkins安装.txt" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/63-202504-CICD新/1.1-自定义编译jenkins.dockerfile" beforeDir="false" afterPath="$PROJECT_DIR$/63-202504-CICD新/1.1-自定义编译jenkins.dockerfile" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/AI安装/install_nvidia_docker.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/z-AI安装/install_nvidia_docker.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/AI安装/nvdia-docker-install-废弃.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/z-AI安装/nvdia-docker-install-废弃.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/a-AgentWdd-基础依赖.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/a-AgentWdd-基础依赖.ps1" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/a-AgentWdd-基础依赖.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/a-AgentWdd-基础依赖.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/octopus-agent-run.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/octopus-agent-run.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/z-同步资料-废弃.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/z-同步资料-废弃.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/deploy-nfs-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/install_minio.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/install_minio.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/在线安装nginx-centos.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx-centos.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/在线安装nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/在线安装nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实nginx-offline-map.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-offline-map.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/真实的nginx配置.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实的nginx配置.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/纯离线部署nginx-docker-compose.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/nginx暴露/设置ingress-nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/为node打标签.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/为node打标签.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/开机启动的脚本.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/开机启动的脚本.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/清理rke集群的安装.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/啊-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/2-imageDownSync-ARM64.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/2-imageDownSync-ARM64.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/2-imageDownSync.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/2-imageDownSync.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/ImageSyncDLTU.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/image-clean.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/image-clean.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/image-sync.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/image-sync.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/imageSyncDLTU.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/imageSyncDLTU.ps1" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/imageSyncDownload.ps1" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/imageSyncDownload.ps1" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/kubernetes-images-2.5.7-1.20.4.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/kubernetes-images-2.5.7-1.20.4.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/middle-image-arm64-250218.txt" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/middle-image-arm64-250218.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/清除镜像.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/清除镜像.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/镜像同步/离线更新tag脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/离线更新tag脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/6.2.0/z.user_base.sql" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/6.2.0/z.user_base.sql" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" afterDir="false" /> <change beforePath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/关键参数说明.txt" beforeDir="false" afterPath="$PROJECT_DIR$/999-部署模板/关键参数说明.txt" afterDir="false" /> <change beforePath="$PROJECT_DIR$/999-部署模板/kubectl" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/rke" beforeDir="false" />
</list> </list>
<option name="SHOW_DIALOG" value="false" /> <option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" /> <option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -108,23 +99,8 @@
<component name="Git.Settings"> <component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component> </component>
<component name="HighlightingSettingsPerFile">
<setting file="file://$PROJECT_DIR$/65-202504-湖南二级监管/hunan-cluster.yaml" root0="FORCE_HIGHLIGHTING" />
</component>
<component name="KubernetesApiPersistence">{}</component> <component name="KubernetesApiPersistence">{}</component>
<component name="KubernetesApiProvider">{ <component name="KubernetesApiProvider">{
&quot;configuredContexts&quot;: [
{
&quot;name&quot;: &quot;hunan-kcs&quot;,
&quot;kubeConfigUrl&quot;: &quot;file://C:/Users/wdd/.kube/config&quot;,
&quot;currentNamespace&quot;: &quot;Kubernetes.All.Namespaces.Label&quot;
},
{
&quot;name&quot;: &quot;cmii-dev-cluster&quot;,
&quot;kubeConfigUrl&quot;: &quot;file://C:/Users/wdd/.kube/config&quot;,
&quot;currentNamespace&quot;: &quot;uavcloud-devflight&quot;
}
],
&quot;isMigrated&quot;: true &quot;isMigrated&quot;: true
}</component> }</component>
<component name="ProjectColorInfo">{ <component name="ProjectColorInfo">{
@@ -132,7 +108,7 @@
&quot;associatedIndex&quot;: 7 &quot;associatedIndex&quot;: 7
}</component> }</component>
<component name="ProjectId" id="2uIM2GeFYYmQNl2rSxI0hVkWGfv" /> <component name="ProjectId" id="2uIM2GeFYYmQNl2rSxI0hVkWGfv" />
<component name="ProjectLevelVcsManager" settingsEditedManually="true"> <component name="ProjectLevelVcsManager">
<ConfirmationsSetting value="2" id="Add" /> <ConfirmationsSetting value="2" id="Add" />
</component> </component>
<component name="ProjectViewState"> <component name="ProjectViewState">
@@ -141,6 +117,7 @@
</component> </component>
<component name="PropertiesComponent">{ <component name="PropertiesComponent">{
&quot;keyToString&quot;: { &quot;keyToString&quot;: {
&quot;KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION&quot;: &quot;true&quot;,
&quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;, &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
&quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;, &quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.formatter.settings.were.checked&quot;: &quot;true&quot;, &quot;RunOnceActivity.go.formatter.settings.were.checked&quot;: &quot;true&quot;,
@@ -148,27 +125,25 @@
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;, &quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;, &quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;, &quot;go.import.settings.migrated&quot;: &quot;true&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wdd/Documents/IdeaProjects/CmiiDeploy/54-202501-深圳规自-ARM/部署yaml&quot;, &quot;last_opened_file_path&quot;: &quot;C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/67-202508-雄安空能院&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;, &quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;, &quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;, &quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;, &quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;, &quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;com.intellij.kubernetes.view.ui.settings.KubernetesViewConfigurable&quot;, &quot;settings.editor.selected.configurable&quot;: &quot;editor.preferences.tabs&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot; &quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
} }
}</component> }</component>
<component name="RecentsManager"> <component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS"> <key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\54-202501-深圳规自-ARM\部署yaml" /> <recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\yaml" /> <recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\deploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\64-202504-云南二级监管" /> <recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\doris-deploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\doris-deploy" /> <recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\已有部署备份" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管" /> <recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\部署文件" />
</key> </key>
<key name="MoveFile.RECENT_KEYS"> <key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\65-202504-湖南二级监管\doris-simple-k8s-失败" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\64-202504-云南二级监管\doris部署" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy" /> <recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\" /> <recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\999-部署模板" /> <recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\999-部署模板" />
@@ -177,8 +152,7 @@
<component name="SharedIndexes"> <component name="SharedIndexes">
<attachedChunks> <attachedChunks>
<set> <set>
<option value="bundled-jdk-9823dce3aa75-a94e463ab2e7-intellij.indexing.shared.core-IU-243.26053.27" /> <option value="bundled-js-predefined-d6986cc7102b-e03c56caf84a-JavaScript-IU-252.23892.409" />
<option value="bundled-js-predefined-d6986cc7102b-1632447f56bf-JavaScript-IU-243.26053.27" />
</set> </set>
</attachedChunks> </attachedChunks>
</component> </component>
@@ -199,21 +173,33 @@
<workItem from="1743989590460" duration="6809000" /> <workItem from="1743989590460" duration="6809000" />
<workItem from="1744092938322" duration="6438000" /> <workItem from="1744092938322" duration="6438000" />
<workItem from="1744272013796" duration="6427000" /> <workItem from="1744272013796" duration="6427000" />
<workItem from="1744768739183" duration="25427000" /> <workItem from="1744768739183" duration="16694000" />
<workItem from="1745205882820" duration="2130000" /> <workItem from="1745226889904" duration="10000" />
<workItem from="1745222576370" duration="702000" /> <workItem from="1745227705639" duration="6000" />
<workItem from="1745289703609" duration="14767000" /> <workItem from="1747105478121" duration="6419000" />
<workItem from="1745562918278" duration="5765000" /> <workItem from="1747206836624" duration="4541000" />
<workItem from="1745717940572" duration="8365000" /> <workItem from="1747276114032" duration="9000" />
<workItem from="1745802927208" duration="14955000" /> <workItem from="1747276405002" duration="63000" />
<workItem from="1745909669919" duration="1949000" /> <workItem from="1748334855133" duration="976000" />
<workItem from="1745911652344" duration="11632000" /> <workItem from="1748340788900" duration="276000" />
<workItem from="1745993177704" duration="2845000" /> <workItem from="1748352348649" duration="849000" />
<workItem from="1746670196398" duration="6037000" /> <workItem from="1748397131812" duration="14964000" />
<workItem from="1746755608411" duration="5580000" /> <workItem from="1748482510696" duration="20005000" />
<workItem from="1746770911238" duration="2282000" /> <workItem from="1748516175436" duration="1120000" />
<workItem from="1747012932022" duration="4082000" /> <workItem from="1748571366058" duration="733000" />
<workItem from="1747204385370" duration="2904000" /> <workItem from="1749025182387" duration="11000" />
<workItem from="1749086362716" duration="2000" />
<workItem from="1750151734362" duration="808000" />
<workItem from="1751268399225" duration="1300000" />
<workItem from="1751269718729" duration="3000" />
<workItem from="1752545931042" duration="197000" />
<workItem from="1752560622970" duration="149000" />
<workItem from="1754445517550" duration="9099000" />
<workItem from="1754530170490" duration="4574000" />
<workItem from="1754546282094" duration="6319000" />
<workItem from="1754616863007" duration="8752000" />
<workItem from="1754744988183" duration="10000" />
<workItem from="1754963140025" duration="708000" />
</task> </task>
<task id="LOCAL-00001" summary="common update"> <task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" /> <option name="closed" value="true" />
@@ -223,23 +209,15 @@
<option name="project" value="LOCAL" /> <option name="project" value="LOCAL" />
<updated>1742174375760</updated> <updated>1742174375760</updated>
</task> </task>
<task id="LOCAL-00002" summary="大量更新"> <task id="LOCAL-00002" summary="修改CICD的jenkins构建脚本">
<option name="closed" value="true" /> <option name="closed" value="true" />
<created>1744873984891</created> <created>1744874102820</created>
<option name="number" value="00002" /> <option name="number" value="00002" />
<option name="presentableId" value="LOCAL-00002" /> <option name="presentableId" value="LOCAL-00002" />
<option name="project" value="LOCAL" /> <option name="project" value="LOCAL" />
<updated>1744873984891</updated> <updated>1744874102820</updated>
</task> </task>
<task id="LOCAL-00003" summary="大量更新"> <option name="localTasksCounter" value="3" />
<option name="closed" value="true" />
<created>1744873998562</created>
<option name="number" value="00003" />
<option name="presentableId" value="LOCAL-00003" />
<option name="project" value="LOCAL" />
<updated>1744873998562</updated>
</task>
<option name="localTasksCounter" value="4" />
<servers /> <servers />
</component> </component>
<component name="TypeScriptGeneratedFilesManager"> <component name="TypeScriptGeneratedFilesManager">
@@ -257,16 +235,11 @@
</option> </option>
</component> </component>
<component name="VcsManagerConfiguration"> <component name="VcsManagerConfiguration">
<option name="CHECK_CODE_SMELLS_BEFORE_PROJECT_COMMIT" value="false" />
<MESSAGE value="common update" /> <MESSAGE value="common update" />
<MESSAGE value="大量更新" /> <MESSAGE value="修改CICD的jenkins构建脚本" />
<option name="LAST_COMMIT_MESSAGE" value="大量更新" /> <option name="LAST_COMMIT_MESSAGE" value="修改CICD的jenkins构建脚本" />
</component> </component>
<component name="VgoProject"> <component name="VgoProject">
<settings-migrated>true</settings-migrated> <settings-migrated>true</settings-migrated>
</component> </component>
<component name="XSLT-Support.FileAssociations.UIState">
<expand />
<select />
</component>
</project> </project>

View File

@@ -37,11 +37,9 @@ spec:
defaultMode: 420 defaultMode: 420
- name: be-storage - name: be-storage
persistentVolumeClaim: persistentVolumeClaim:
# claimName: meta
claimName: doris-be-storage-pvc claimName: doris-be-storage-pvc
- name: be-log - name: be-log
persistentVolumeClaim: persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc claimName: doris-fe-log-pvc
initContainers: initContainers:
- name: default-init - name: default-init
@@ -116,11 +114,11 @@ spec:
value: '9030' value: '9030'
resources: resources:
limits: limits:
cpu: '2' cpu: '16'
memory: 2Gi memory: 32Gi
requests: requests:
cpu: '1' cpu: '8'
memory: 1Gi memory: 32Gi
volumeMounts: volumeMounts:
- name: podinfo - name: podinfo
mountPath: /etc/podinfo mountPath: /etc/podinfo

View File

@@ -1,3 +1,4 @@
---
apiVersion: doris.selectdb.com/v1 apiVersion: doris.selectdb.com/v1
kind: DorisCluster kind: DorisCluster
metadata: metadata:

View File

@@ -0,0 +1,54 @@
#!/bin/bash
host_ip_list=(192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 192.168.10.16 192.168.10.17 192.168.10.18 192.168.10.19)
for server in "${host_ip_list[@]}";do
echo "server is ${server}"
# ssh -p 2202 root@"$server" "mkdir /root/.ssh && echo \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIgzVwaG6h4al71GhrM2zRmJ8hg7ySelDM0GXUz3SZiF wdd@cmii.com\" >> /root/.ssh/authorized_keys"
ssh -p 2202 root@"$server" "echo yes !"
# ssh -p 2202 root@"$server" "systemctl start nfs-client & systemctl start nfs-client & systemctl start nfs-common & systemctl enable nfs-common"
# ssh -p 2202 root@"$server" "yum install -y chrony"
# ssh -p 2202 root@"$server" "sed -i \"s/server 10.211.174.206 iburst/server 192.168.10.3 iburst/g\" /etc/chrony.conf"
# ssh -p 2202 root@"$server" "systemctl restart chronyd && systemctl enable chronyd"
# ssh -p 2202 root@"$server" "timedatectl && echo "" && chronyc sources"
# ssh -p 2202 root@"$server" "cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back_wdd"
# ssh -p 2202 root@"$server" "rm /etc/ssh/sshd_config"
# scp -P 2202 /etc/ssh/sshd_config root@"$server":/etc/ssh/sshd_config
# ssh -p 2202 root@"$server" "systemctl restart sshd"
# scp -P 2202 /root/yanko/files/docker-19.03.15.tgz root@"$server":/data/
# ssh -p 2202 root@"$server" "sudo tar -xzvf /data/docker-19.03.15.tgz -C /usr/bin --strip-components=1"
# ssh -p 2202 root@"$server" "systemctl restart docker && sleep 3 && docker info"
# scp -P 2202 /root/agent-wdd_linux_amd64 root@"$server":/usr/local/bin/agent-wdd
# ssh -p 2202 root@"$server" "chmod +x /usr/local/bin/agent-wdd"
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base swap"
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base firewall"
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base selinux"
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base sysconfig"
# ssh -p 2202 root@"$server" "docker stop \$(docker ps -aq)"
# ssh -p 2202 root@"$server" "docker container rm \$(docker ps -aq)"
ssh -p 2202 root@"$server" "reboot"
done

View File

@@ -0,0 +1,182 @@
app:
env: default
port: 2333
log:
level: DEBUG
node:
cluster:
enable: false
capacity:
cpu: 8
id: "auto"
ip: "auto"
redis:
host: 192.168.10.3
port: 36379
database: 6
password: Mcache@4522
rabbitmq:
host: 192.168.10.3
port: 35672
username: admin
password: nYcRN91r._hj
mqtt:
host: 192.168.10.3
port: 32883
username: cmlc
password: odD8#Ve7.B
ai_models:
# Then remember to synchronously updated the configuration here
# to ModelStore core:tasking:store:ModelStore
local:
drone:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drone-20241223-t4.rt"
classes: "drone, bird"
sea:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/sea.engine"
classes: "person, boat"
people_vehicle:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/people_vehicle-t4-20240410.rt"
classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
vehicle:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/vehicle-20240328-t4.rt"
classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
inf_person:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/inf_person-20241129-t4.rt"
classes: "person"
ship:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
classes: "ship"
ship_with_flag:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
classes: "ship, flag"
drowning:
enable: true
type: yolov8
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drowning-20240222-t4.rt"
classes: "drowner"
dino:
enable: false
type: dino
path: "/cmii/cmlc-project-ai-streaming-engine/models/dino/ground.engine"
tokenizer: "bert-base-uncased"
fake:
# Do nothing. For some tasks not need ai processing such as adding text.
enable: true
type: fake
task:
plain:
usage:
cpu: 2
laad:
usage:
cpu: 2
mq:
detail:
topic: "event.ai.photoelectricity.warn.detail"
exchange: "event.ai.photoelectricity.warn.detail"
briefly:
topic: "event.ai.photoelectricity.warn.briefly"
exchange: "event.ai.photoelectricity.warn.briefly"
count:
usage:
cpu: 2
mq:
topic: "aiVideo"
exchange: "aiVideo"
accumulation:
usage:
cpu: 2
mq:
topic: "aiVideo"
exchange: "aiVideo"
text:
usage:
cpu: 2
module:
shm:
ring_size: 20
max_w: 2600
max_h: 1500
max_dets: 256
smot:
alive: 1
tolerance: 256
drop: 192
hits: 2
ffio:
gpu:
enable: true
track:
type: bytetrack
bytetrack:
fps: 30
draw:
colors:
default: [ 0, 255, 0 ]
drone: [ 229, 57, 57 ]
bird: [ 97, 237, 38 ]
motor: [ 92, 184, 255 ]
car: [ 67, 144, 219 ]
truck: [ 41, 115, 204 ]
bus: [ 36, 93, 179 ]
person: [ 255, 200, 51 ]
people: [ 255, 200, 51 ]
drowner: [ 0, 127, 245 ]
ship: [ 102, 236, 204 ]
region: [60, 110, 156]
crossline: [60, 110, 156]
text:
padding: 4
skip_threshold: 20
cmlc:
mapper:
"111":
task: count
model: vehicle
"114":
task: count
model: vehicle
"115":
task: accumulation
model: vehicle
"112":
task: count
model: inf_person
"113":
task: count
model: drowning
"121":
task: laad
model: drone
"122":
task: count
model: drone
"131":
task: count
model: ship
"201":
task: text
model: fake
debug:
enable: true

View File

@@ -0,0 +1,324 @@
nodes:
- address: 192.168.10.3
user: root
port: 2202
role:
- controlplane
- etcd
- worker
internal_address: 192.168.10.3
labels:
ingress-deploy: true
- address: 192.168.10.4
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.4
labels:
ingress-deploy: true
mysql-deploy: true
uavcloud.env: zjyd
- address: 192.168.10.5
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.5
labels:
ingress-deploy: true
uavcloud.env: zjyd
- address: 192.168.10.6
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.6
labels:
ingress-deploy: true
uavcloud.env: zjyd
- address: 192.168.10.2
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.2
labels:
mongo.node: master
- address: 192.168.10.8
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.8
labels:
uavcloud.env: zjyd
- address: 192.168.10.9
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.9
labels:
redis.node: master
- address: 192.168.10.20
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.20
labels:
uavcloud.env: zjyd
- address: 192.168.10.21
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.21
labels:
uavcloud.env: zjyd
- address: 192.168.10.22
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.22
labels:
uavcloud.env: zjyd
- address: 192.168.10.23
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.23
labels:
uavcloud.env: zjyd
- address: 192.168.10.16
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.16
labels:
doris.cluster: "true"
- address: 192.168.10.17
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.17
labels:
doris.cluster: "true"
- address: 192.168.10.18
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.18
labels:
doris.cluster: "true"
- address: 192.168.10.19
user: root
port: 2202
role:
- worker
internal_address: 192.168.10.19
labels:
doris.cluster: "true"
authentication:
strategy: x509
sans:
- "192.168.10.3"
private_registries:
- url: 192.168.10.3:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.29.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 0
# Using the EventRateLimit admission control enforces a limit on the number of events
# that the API Server will accept in a given time period
# Available as of v1.0.0
event_rate_limit:
enabled: false
configuration:
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Server
qps: 6000
burst: 30000
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.29.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.29.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 162
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 0
authorization:
mode: rbac
addon_job_timeout: 30
network:
options:
flannel_backend_type: host-gw
flannel_iface: ens192
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
# Specify network plugin-in (canal, calico, flannel, weave, or none)
# network:
# mtu: 1440
# options:
# flannel_backend_type: vxlan
# plugin: calico
# tolerations:
# - key: "node.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
# - key: "node.kubernetes.io/not-ready"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal:
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

@@ -0,0 +1,26 @@
kubectl label nodes 192.168.10.17 192.168.10.18 192.168.10.19 doris-be-node=true
# 2. 在每台节点创建存储目录
for node in 192.168.10.17 192.168.10.18 192.168.10.19; do
ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-be/storage && sudo chmod 777 /data/doris-be"
ssh -p 2202 root@"$node" "ls /data/doris-be/"
done
kubectl label nodes 192.168.10.16 doris-fe-node=true
for node in 192.168.10.16; do
ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-fe/storage && sudo chmod 777 /data/doris-fe"
ssh -p 2202 root@"$node" "ls /data/dorisfe/"
done
# uas的业务 保证防重复部署 只能在这几台
kubectl label nodes 192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 uavcloud.env=zjejpt-uas
# rabbitmq需要固定到特定的节点
kubectl label nodes 192.168.10.8 rabbitmq.node=master
kubectl label nodes 192.168.10.8 emqx.node=master
kubectl label nodes 192.168.10.9 redis.node=master

View File

@@ -0,0 +1,102 @@
kind: Service
apiVersion: v1
metadata:
namespace: zjejpt-uas
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP
---
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort
---
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP
---
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort
---

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: zyly
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,208 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- "192.168.10.17"
- "192.168.10.18"
- "192.168.10.19"
- key: doris-be-node
operator: Exists
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values: [ "doris-cluster-be" ]
topologyKey: "kubernetes.io/hostname"
volumes:
- name: be-local-storage
hostPath:
path: /data/doris-be/storage
type: DirectoryOrCreate
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
initContainers:
- name: pod-ordinal-init
image: 192.168.10.3:8033/cmii/alpine:1.0.0
command: [ 'sh', '-c' ]
args:
- |
# 获取Pod序号
POD_ORDINAL=$(echo ${POD_NAME} | awk -F- '{print $NF}')
# 通过节点名称匹配序号
case ${NODE_NAME} in
"192.168.10.17") ORDINAL=0 ;;
"192.168.10.18") ORDINAL=1 ;;
"192.168.10.19") ORDINAL=2 ;;
esac
# 验证序号匹配
if [ "$POD_ORDINAL" != "$ORDINAL" ]; then
echo "ERROR: Pod ordinal ${POD_ORDINAL} not match node ${NODE_NAME}"
exit 1
fi
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: default-init
image: '192.168.10.3:8033/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '16'
memory: 32Gi
requests:
cpu: '8'
memory: 32Gi
volumeMounts:
- name: be-local-storage
mountPath: /opt/apache-doris/be/storage
- name: be-local-storage
mountPath: /opt/apache-doris/be/log
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,188 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
initContainers:
- name: default-init
image: '192.168.10.3:8033/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '16'
memory: 32Gi
requests:
cpu: '8'
memory: 32Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-storage
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values: [ "doris-cluster-be" ]
topologyKey: "kubernetes.io/hostname"
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-be-node
operator: In
values: [ "true" ]
schedulerName: default-scheduler
volumeClaimTemplates:
- metadata:
name: be-storage
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "local-storage"
resources:
requests:
storage: 1500Gi
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,160 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: zjejpt-uas
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: fe-local-storage
hostPath:
path: /data/doris-fe/storage
type: DirectoryOrCreate
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: '192.168.10.3:8033/cmii/doris.fe-amd64:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '16'
memory: 32Gi
requests:
cpu: '8'
memory: 32Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: fe-local-storage
mountPath: /opt/apache-doris/fe/log
- name: fe-local-storage
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- "192.168.10.16"
- key: doris-fe-node
operator: Exists
schedulerName: default-scheduler
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,79 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Retain
allowedTopologies:
- matchLabelExpressions:
- key: doris-be-node
values: ["true"]
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-be-pv-node1
spec:
capacity:
storage: 1500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/doris-be/storage
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["192.168.10.17"]
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-be-pv-node2
spec:
capacity:
storage: 1500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/doris-be/storage
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["192.168.10.18"]
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-be-pv-node3
spec:
capacity:
storage: 1500Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/doris-be/storage
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["192.168.10.19"]

View File

@@ -0,0 +1,103 @@
#!/bin/bash
set -e
# 定义变量
DOCKER_TAR="docker-25.0.0.tgz"
SYSTEMD_DIR="/lib/systemd/system"
BIN_DIR="/usr/local/bin"
# 0. 停止旧版本服务(如有)
sudo systemctl stop docker containerd.socket containerd 2>/dev/null || true
# 1. 解压Docker二进制包
echo "解压Docker二进制包..."
sudo tar -xzvf ${DOCKER_TAR} -C ${BIN_DIR} --strip-components=1
# 2. 确保二进制文件可执行
sudo chmod +x ${BIN_DIR}/{containerd,ctr,dockerd,docker,runc}
# 3. 配置containerd.service
echo "配置containerd服务..."
cat > ${SYSTEMD_DIR}/containerd.service <<'EOF'
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=${BIN_DIR}/containerd
KillMode=process
Delegate=yes
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
[Install]
WantedBy=multi-user.target
EOF
# 4. 配置docker.service
echo "配置Docker服务..."
cat > ${SYSTEMD_DIR}/docker.service <<'EOF'
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=${BIN_DIR}/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
# 5. 配置docker.socket
echo "配置Docker Socket..."
cat > ${SYSTEMD_DIR}/docker.socket <<'EOF'
[Unit]
Description=Docker Socket for the API
PartOf=docker.service
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
# 6. 创建docker用户组
echo "配置用户组..."
sudo groupadd -f docker
sudo usermod -aG docker $USER 2>/dev/null && echo "已添加用户 $USER 到docker组"
# 7. 启用并启动服务
echo "启动服务..."
sudo systemctl daemon-reload
sudo systemctl enable --now containerd docker
# 8. 验证安装
echo -e "\n验证状态"
sudo systemctl status containerd docker | grep "Active:"
echo -e "\nDocker版本"
${BIN_DIR}/docker --version

View File

@@ -0,0 +1,143 @@
# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
# To modify the system-wide sshd configuration, create a *.conf file under
# /etc/ssh/sshd_config.d/ which will be automatically included below
#Include /etc/ssh/sshd_config.d/*.conf
# If you want to change the port on a SELinux system, you have to tell
# SELinux about this change.
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
#
Port 2202
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
PermitRootLogin yes
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
# but this is overridden so installations will only check .ssh/authorized_keys
AuthorizedKeysFile .ssh/authorized_keys
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
PasswordAuthentication yes
#PermitEmptyPasswords no
# Change to no to disable s/key passwords
KbdInteractiveAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
#KerberosUseKuserok yes
# GSSAPI options
GSSAPIAuthentication yes
GSSAPICleanupCredentials no
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
#GSSAPIEnablek5users no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the KbdInteractiveAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via KbdInteractiveAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and KbdInteractiveAuthentication to 'no'.
# WARNING: 'UsePAM no' is not supported in openEuler and may cause several
# problems.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS
# override default of no subsystems
Subsystem sftp /usr/libexec/openssh/sftp-server -l INFO -f AUTH
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# PermitTTY no
# ForceCommand cvs server
#CheckUserSplash yes
PubkeyAuthentication yes
RSAAuthentication yes
IgnoreRhosts yes
PermitEmptyPasswords no
Banner /etc/issue.net
AllowTcpForwarding yes

View File

@@ -0,0 +1,55 @@
下面是压缩Docker镜像为分块压缩包及合并的命令
---
### **1. 压缩并分割镜像**
#### **推荐方式:直接通过管道压缩并分块**
使用 `gzip` 压缩(速度快,中等压缩率):
```bash
docker save <IMAGE_NAME:TAG> | gzip | split -b 5G - image_part_.gz
```
或使用 `xz` 压缩(高压缩率,速度慢):
```bash
docker save <IMAGE_NAME:TAG> | xz -T0 | split -b 5G - image_part_.xz
```
**参数说明**
- `<IMAGE_NAME:TAG>`:替换为实际的镜像名称和标签。
- `split -b 5G`将输入流分割为每块最大5GB。
- `image_part_.gz``image_part_.xz`:分块文件前缀,生成的文件名类似 `image_part_.gz.aa``image_part_.gz.ab` 等。
---
### **2. 合并分块并还原镜像**
#### **gzip 压缩分块的合并与加载**
```bash
cat image_part_.gz.* | gunzip | docker load
```
#### **xz 压缩分块的合并与加载**
```bash
cat image_part_.xz.* | xz -d | docker load
```
---
### **工作原理**
1. **压缩分块**
- `docker save` 输出镜像的 TAR 存档到标准输出。
- 通过管道将 TAR 数据实时压缩(`gzip``xz`)。
- `split` 将压缩后的流按 `5G` 大小分割为多个文件。
2. **合并还原**
- `cat` 按顺序合并所有分块文件。
- `gunzip``xz -d` 解压合并后的流。
- `docker load` 从解压后的 TAR 流中加载镜像。
---
### **注意事项**
- **分块命名**`split` 默认生成 `aa`, `ab` 等后缀。若分块超过几百个,需用 `-a <长度>` 指定后缀长度(如 `-a 3` 生成 `001`)。
- **磁盘空间**:合并时需要足够的临时空间存储解压后的完整 TAR 数据如原镜像为24GB需至少24GB空间
- **压缩选择**
- `gzip`:速度较快,适合快速处理。
- `xz`压缩率更高尤其适合二进制数据但需要更多时间和CPU资源。

View File

@@ -0,0 +1,20 @@
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-gateway=2.1-demo-20250527-licence.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-lifecycle=2.1-demo-20250527-licence.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-notice=pro-6.0.8.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-material-warehouse=6.2.0-050701.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uasms=2.1-demo-20250527.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uas=2.1-demo-20250527.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uavms-pyfusion=6.3.6.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-iot-dispatcher=6.2.0-focus.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-sense-adapter=6.2.0-250415.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-watchdog=1.0.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-live-operator=5.2.0.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=srs=v5.0.195-arm.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-srs-oss-adaptor=2023-SA-skip-CHL.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/docker=cmii=doris.fe-ubuntu=2.1.6.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=doris.be-amd64=2.1.6.tar.gz
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzaa
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzab
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzac
https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/v5.7.0-x86/gb28181_x86_2.7.3_20250414.img.tar
https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/docker-gb28181.tar

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,19 @@
#!/bin/bash
scp -P 2202 /root/wdd/install/auth_file.json root@192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog/
ssh -p 2202 root@192.168.10.2 "ls /data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog"
# 生成授权文件
curl http://localhost:8080/api/authorization/generate
# 处理授权码
curl -X POST \
http://localhost:8080/api/authorization/auth \
-H 'Content-Type: application/json' \
--data-binary @auth_file.json
#
curl http://localhost:8080/api/authorization/hosts

View File

@@ -0,0 +1,144 @@
###### 监管平台转发
location ^~ /uas {
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.zjejpt-uas.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location / {
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.zjyd.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://192.168.10.3:30985/rtc/v1/;
}
### 视频国标GB28181 ###
# location /zlm/flv/ {
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
# #开启HTTP严格传输安全HSTS
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
# add_header Access-Control-Allow-Headers X-Requested-With;
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
# proxy_pass http://192.168.10.25:7088/;
# }
# location /zlm/hls/ {
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
# #开启HTTP严格传输安全HSTS
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
# add_header Access-Control-Allow-Headers X-Requested-With;
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
# proxy_pass http://192.168.10.25:7088/zlm/hls/;
# }
# location /index/api/ {
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
# #开启HTTP严格传输安全HSTS
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
# add_header Access-Control-Allow-Headers X-Requested-With;
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
# proxy_pass http://192.168.10.25:7088/index/api/;
# }
#location /video_feed {
# proxy_pass http://192.168.10.12:5000;
# proxy_http_version 1.1;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
# proxy_set_header Host $host;
# proxy_cache_bypass $http_upgrade;
#}
#location /video_person {
# proxy_pass http://192.168.10.12:5001;
# proxy_http_version 1.1;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
# proxy_set_header Host $host;
# proxy_cache_bypass $http_upgrade;
#}
#location /video {
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
# #开启HTTP严格传输安全HSTS
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
# alias /data/test/;
# index 10.mp4;
#}
#location ~ ^/\w*/actuator/ {
# return 403;
#}
location ~ ^/.*/(actuator|swagger-resources|api-docs|health).* {
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
return 404;
}

View File

@@ -0,0 +1,274 @@
user www www;
worker_processes auto;
error_log logs/error.log warn;
pid /var/run/nginx/nginx.pid;
events {
worker_connections 65535;
}
stream{
include /data/nginx/conf/blacklist.conf;
include /data/nginx/conf/blacklist_zhejiang.conf;
deny all;
#飞行数据-mqtt
upstream tcp31883{
server 127.0.0.1:32883; #中移凌云飞行数据
}
server{
listen 31883;
proxy_pass tcp31883;
}
#飞行数据-mqtt-websocket
upstream tcp38083{
server 127.0.0.1:39083;
}
server{
listen 38083;
proxy_pass tcp38083;
}
#视频流媒体-RTMP
upstream tcp31935{
server 127.0.0.1:32935;
}
server{
listen 31935;
proxy_pass tcp31935;
}
#视频流媒体-WEBRTC
upstream udp30090{
server 127.0.0.1:31090;
}
server{
listen 30090 udp;
proxy_pass udp30090;
}
#视频流播放TCP端口
#upstream tcp30080{
# server 127.0.0.1:31080;
#}
#server{
# listen 30080;
# proxy_pass tcp30080;
#}
#rtsp-控制TCP端口
#upstream tcp30554{
# server 127.0.0.1:32554;
#}
#server{
# listen 30554;
# proxy_pass tcp30554;
#}
#rtsp-数据TCP端口
#upstream tcp30556{
# server 127.0.0.1:32556;
#}
#server{
# listen 30556;
# proxy_pass tcp30556;
#}
#rtsp-数据UDP端口
#upstream udp30556{
# server 127.0.0.1:32556;
#}
#server{
# listen 30556 udp;
# proxy_pass udp30556;
#}
#模拟数据测试UDP端口
#upstream udp30556{
# server 127.0.0.1:31556;
#}
#server{
# listen 30556 udp;
# proxy_pass udp30556;
#}
#RabbitMQ控制台端口
# server{
# listen 32002;
# proxy_pass 192.168.10.11:15672;
# }
}
http {
include /data/nginx/conf/blacklist.conf;
include /data/nginx/conf/blacklist_zhejiang.conf;
deny all;
include mime.types;
default_type application/octet-stream;
## 去除版本信息 ##
server_tokens off;
#error日志更换
#fastcgi_intercept_errors on;
error_log logs/error.log warn;
#####
sendfile on;
keepalive_timeout 60;
client_body_timeout 30s;
client_header_timeout 30s;
send_timeout 30s;
gzip on;
#more_clear_headers 'Server';
add_header X-Frame-Options SAMEORIGIN always;
add_header X-Content-Type-Options nosniff;
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
underscores_in_headers on;
log_format main '$remote_addr - $remote_user [$time_local]'
'#"$request_method $scheme://$host$request_uri $server_protocol" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" - "$request_time"';
access_log /data/nginx/logs/access.log main;
server {
listen 8088 ssl;
server_name lingyun.zyjctech.com
index index.jsp index.htm index.html;
### 跨域设置(临时) ###
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Origin, Content-Type, Accept, Authorization';
if ($request_method = 'OPTIONS') {
return 204;
}
# 禁止 iframe 嵌套
add_header X-Frame-Options SAMEORIGIN always;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
#开启HTTP严格传输安全HSTS
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
### ssl配置 ###
ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
ssl_session_timeout 10m;
## 新增 ##
#ssl_stapling_verify on;
#ssl_session_cache shared:SSL:50m;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
#ssl_prefer_server_ciphers off;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2 TLSv1.3;
##############################
include /data/nginx/conf/vhost8088/*.conf;
client_max_body_size 1024m;
client_body_buffer_size 512k;
client_header_timeout 3m;
send_timeout 3m;
proxy_connect_timeout 600;
proxy_read_timeout 600;
proxy_send_timeout 600;
###自定义403返回拦截的ip#########
error_page 403 /error.html;
location = /error.html {
default_type text/plain;
return 403 "Access failed. Please contact the administrator to add the IP whitelist IP:$remote_addr";
}
}
#K8S DashBoard
# server {
# listen 30554 ssl;
# ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
# ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
# ssl_session_timeout 5m;
# ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
# ssl_prefer_server_ciphers off;
# ssl_protocols TLSv1.2 TLSv1.3;
# client_max_body_size 1024m;
# client_body_buffer_size 256k;
# client_header_timeout 3m;
# client_body_timeout 3m;
# send_timeout 3m;
# proxy_connect_timeout 600;
# proxy_read_timeout 600;
# proxy_send_timeout 600;
# proxy_buffer_size 256k;
# proxy_buffers 4 256k;
# proxy_busy_buffers_size 256k;
# location / {
# proxy_pass https://127.0.0.1:32000;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# }
# }
## minio控制台 ##
# server {
# listen 32002; #或者用80端口也可以
# server_name 188.106.25.136; #可以用域名
# add_header X-Frame-Options SAMEORIGIN always;
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
# location / {
# proxy_next_upstream http_500 http_502 http_503 http_504 error timeout invalid_header;
# proxy_set_header Host $http_host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_pass http://188.106.25.132:9001;
# expires 0;
# }
# }
## AiMapServer ##
# server {
# listen 32007; #或者用80端口也可以
# server_name 188.106.25.136; #可以用域名
# add_header X-Frame-Options SAMEORIGIN always;
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
# location / {
# proxy_pass http://188.106.25.222:5090/aimap-server/manager/login;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# }
# location /aimap-server {
# proxy_pass http://188.106.25.222:5090/aimap-server;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# }
# location /gisapi {
# proxy_pass http://188.106.25.222:5090/gisapi;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# }
# }
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,644 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "iot",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: zjejpt-uas
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "lingyun.zyjctech.com:8088",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}

View File

@@ -0,0 +1,203 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: zjejpt-uas
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uasms
namespace: zjejpt-uas
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.1
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uasms
image: 192.168.10.3:8033/cmii/cmii-uav-platform-uasms:2.1-demo-20250527
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zjejpt-uas
- name: APPLICATION_NAME
value: cmii-uav-platform-uasms
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uasms
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uasms
namespace: zjejpt-uas
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.1
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasms
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uas
namespace: zjejpt-uas
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.1
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uas
image: 192.168.10.3:8033/cmii/cmii-uav-platform-uas:2.1-demo-20250527
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zjejpt-uas
- name: APPLICATION_NAME
value: cmii-uav-platform-uas
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uas
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uas
namespace: zjejpt-uas
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.1
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uas
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528

View File

@@ -0,0 +1,66 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: zjejpt-uas
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
spec:
rules:
- host: fake-domain.zjejpt-uas.io
http:
paths:
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: zjejpt-uas
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.zjejpt-uas.io
http:
paths:
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim-uas
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-uas-storage-class" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-uas-storage-class
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod-uas
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 192.168.10.3:8033/cmii/alpine:1.0.0
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner-uas
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-uas-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner-uas
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner-uas
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-uas-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner-uas
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner-uas
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner-uas
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner-uas
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-uas-storage-class
provisioner: cmlc-nfs-uas-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner-uas
labels:
app: nfs-client-provisioner-uas
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner-uas
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner-uas
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner-uas
containers:
- name: nfs-client-provisioner-uas
image: 192.168.10.3:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-uas-storage
- name: NFS_SERVER
value: 192.168.10.2
- name: NFS_PATH
value: /data/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.10.2
path: /data/nfs_data

View File

@@ -0,0 +1,20 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: zjejpt-uas
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.1
spec:
storageClassName: nfs-uas-storage-class
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 1000Gi

View File

@@ -0,0 +1,34 @@
#!/bin/bash
host_ip_list=(192.168.0.3 192.168.0.4 192.168.0.5 192.168.0.6)
for server in "${host_ip_list[@]}";do
ssh root@$server "echo yes !"
ssh root@$server "curl -s http://172.29.137.125"
ssh root@$server "echo /n/n"
done
# scp /usr/local/bin/agent-wdd root@"$server":/usr/local/bin/agent-wdd
# ssh root@"$server" "chmod +x /usr/local/bin/agent-wdd"
# ssh root@"$server" "/usr/local/bin/agent-wdd info all"
# ssh root@"$server" "/usr/local/bin/agent-wdd base swap"
# ssh root@"$server" "/usr/local/bin/agent-wdd base firewall"
# ssh root@"$server" "/usr/local/bin/agent-wdd base selinux"
# ssh root@"$server" "/usr/local/bin/agent-wdd base sysconfig"
ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/disk.sh"
scp /roo/wdd/docker.sh root@${server}:/root/wdd/
ssh root@${server} "bash /root/wdd/docker.sh"

View File

@@ -0,0 +1,239 @@
nodes:
- address: 192.168.0.2
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.0.2
labels:
ingress-deploy: true
- address: 192.168.0.3
user: root
role:
- worker
internal_address: 192.168.0.3
labels:
ingress-deploy: true
uavcloud.env: xakny
- address: 192.168.0.4
user: root
role:
- worker
internal_address: 192.168.0.4
labels:
ingress-deploy: true
uavcloud.env: xakny
- address: 192.168.0.5
user: root
role:
- worker
internal_address: 192.168.0.5
labels:
ingress-deploy: true
uavcloud.env: xakny
mysql-deploy: true
- address: 192.168.0.6
user: root
role:
- worker
internal_address: 192.168.0.6
labels:
doris.cluster: "true"
minio.node: "true"
authentication:
strategy: x509
sans:
- "192.168.0.2"
private_registries:
- url: 192.168.0.2:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.29.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 0
# Using the EventRateLimit admission control enforces a limit on the number of events
# that the API Server will accept in a given time period
# Available as of v1.0.0
event_rate_limit:
enabled: false
configuration:
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Server
qps: 6000
burst: 30000
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.29.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.29.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 162
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 0
authorization:
mode: rbac
addon_job_timeout: 30
network:
options:
flannel_backend_type: host-gw
flannel_iface: eth0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify network plugin-in (canal, calico, flannel, weave, or none)
# network:
# mtu: 1440
# options:
# flannel_backend_type: vxlan
# plugin: calico
# tolerations:
# - key: "node.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
# - key: "node.kubernetes.io/not-ready"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal:
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

@@ -0,0 +1,82 @@
#!/bin/bash
harbor_host=192.168.0.2:8033
namespace=xakny
app_name=""
new_tag=""
download_from_oss() {
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
echo "start to download => $1"
wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
echo ""
echo ""
}
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
download_from_oss "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,672 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "iot",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: xakny
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.63.69.71:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 192.168.0.2:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 192.168.0.2:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: xakny
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: xakny
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "xakny"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: xakny
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: xakny
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 192.168.0.2:8033/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: xakny
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: xakny
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: xakny
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: xakny
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: xakny
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

View File

@@ -0,0 +1,203 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-cm
namespace: xakny
labels:
cmii.type: frontend
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uaskny
namespace: xakny
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uaskny
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.1
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uaskny
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uaskny
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uaskny
image: 192.168.0.2:8033/cmii/cmii-uav-platform-uaskny:5.7.0-snapshot
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: xakny
- name: APPLICATION_NAME
value: cmii-uav-platform-uaskny
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uas
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uaskny
namespace: xakny
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uaskny
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.1
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uaskny
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-platform-uasmskny
namespace: xakny
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasmskny
octopus.control: frontend-app-wdd
app.kubernetes.io/app-version: uas-2.1
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasmskny
template:
metadata:
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasmskny
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-platform-uasmskny
image: 192.168.0.2:8033/cmii/cmii-uav-platform-uasmskny:develop-0807
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: xakny
- name: APPLICATION_NAME
value: cmii-uav-platform-uasmskny
ports:
- name: platform-9528
containerPort: 9528
protocol: TCP
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d/nginx.conf
subPath: nginx.conf
- name: tenant-prefix
subPath: ingress-config.js
mountPath: /home/cmii-platform/dist/ingress-config.js
volumes:
- name: nginx-conf
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf
- name: tenant-prefix
configMap:
name: tenant-prefix-uasms
items:
- key: ingress-config.js
path: ingress-config.js
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-platform-uasmskny
namespace: xakny
labels:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasmskny
octopus.control: frontend-app-wdd
app.kubernetes.io/version: uas-2.1
spec:
type: ClusterIP
selector:
cmii.type: frontend
cmii.app: cmii-uav-platform-uasmskny
ports:
- name: web-svc-port
port: 9528
protocol: TCP
targetPort: 9528

View File

@@ -0,0 +1,826 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: xakny
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/iot)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.xakny.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-iot-manager
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uaskny
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasmskny
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: xakny
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-datahub.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-datahub
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-iot-manager.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-manager
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-tcp-server.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tcp-server
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-watchdog.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-watchdog
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-pyfusion.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-pyfusion
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-xakny.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: xakny
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.xakny.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: xakny
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: xakny
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: uas-2.1
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 192.168.0.2:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: xakny
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xakny
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xakny
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xakny
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xakny
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: xakny
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: xakny
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 192.168.0.2:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 192.168.0.2:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/xakny/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: xakny
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.1
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: xakny
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.1
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: xakny
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: uas-2.1
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: uas-2.1
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 192.168.0.2:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 192.168.0.2:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 192.168.0.2:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.0.6
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.0.6
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: xakny
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.1
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: xakny
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.1
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: xakny
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.1
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: xakny
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: uas-2.1
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xakny
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xakny
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: xakny
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: xakny
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 192.168.0.2:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 192.168.0.2:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,593 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xakny
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xakny
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xakny
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xakny
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- xakny
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.0.2:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: xakny
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 0
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: xakny
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: xakny
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.0.2:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.xakny.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: xakny
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://111.63.69.71:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 192.168.0.2:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 111.63.69.71
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: xakny/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: xakny/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 192.168.0.2:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://helm-minio:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: xakny/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 192.168.0.2:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: xakny
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: uas-2.1
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: uas-2.1
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: uas-2.1
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://111.63.69.71:31935'
rtsp: 'rtsp://111.63.69.71:30554'
srt: 'srt://111.63.69.71:30556'
flv: 'http://111.63.69.71:30500'
hls: 'http://111.63.69.71:30500'
rtc: 'webrtc://111.63.69.71:30080'
replay: 'https://111.63.69.71:30333'
minio:
endpoint: http://helm-minio:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,63 @@
#! /bin/bash
install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/sdc1
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
#mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /home/app-plus
#mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
s
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/s${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
echo "n
p
t
8e
w
" | fdisk /dev/vda
partprobe
vgextend klas_host-10-190-202-141 /dev/vda4
lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
partprobe
xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
df -TH

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: xakny
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
namespace: xakny
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort

View File

@@ -0,0 +1,214 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
image: '192.168.0.2:8033/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '1'
memory: 1Gi
requests:
cpu: '0.5'
memory: 500Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: '192.168.0.2:8033/cmii/doris.be-ubuntu:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '8'
memory: 8Gi
requests:
cpu: '4'
memory: 4Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-log
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris.cluster
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-be
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-storage
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: xakny
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort

View File

@@ -0,0 +1,198 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: xakny
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: meta
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-meta-pvc
- name: log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: '192.168.0.2:8033/cmii/doris.fe-ubuntu:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '4'
memory: 8Gi
requests:
cpu: '2'
memory: 4Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: log
mountPath: /opt/apache-doris/fe/log
- name: meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris.cluster
operator: In
values:
- "true"
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-fe
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: meta
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10G
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: xakny
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi

View File

@@ -0,0 +1,5 @@
修改PVC文件
修改全部的NAMESPACE
修改statefulset里面的IMAGE

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: xakny
name: helm-minio
spec:
serviceName: helm-minio
replicas: 1
selector:
matchLabels:
app: helm-minio
template:
metadata:
labels:
app: helm-minio
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: minio.node
operator: In
values:
- "true"
containers:
- name: minio
image: 192.168.0.2:8033/cmii/minio:RELEASE.2023-06-02T23-17-26Z
command: ["/bin/sh", "-c"]
args:
- minio server /data --console-address ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ACCESS_KEY
value: "cmii"
- name: MINIO_SECRET_KEY
value: "B#923fC7mk"
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumes:
- name: data
# persistentVolumeClaim:
# claimName: helm-minio
hostPath:
path: /var/lib/docker/minio-pv/xakny/
---
apiVersion: v1
kind: Service
metadata:
name: helm-minio
namespace: xakny
spec:
selector:
app: helm-minio
ports:
- name: api
port: 9000
targetPort: 9000
nodePort: 39000
- name: console
port: 9001
targetPort: 9001
nodePort: 39001
type: NodePort

View File

@@ -0,0 +1,52 @@
#!/bin/bash
export tenant_name=outside
export inner_master_ip=192.168.0.2
export minio_host_ip=192.168.0.2
mc alias set ${tenant_name} http://${minio_host_ip}:39000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"

View File

@@ -0,0 +1,3 @@
%}3}vbJXWv

View File

@@ -0,0 +1,112 @@
#!/bin/bash
set -eo pipefail
# 定义脚本参数
DOCKER_VERSION="20.10" # 在这里修改期望的版本
UBUNTU_IDS=("18.04" "20.04" "22.04" "24.04")
ALIYUN_MIRROR="https://mirrors.aliyun.com"
DOCKER_COMPOSE_VERSION="2.26.1"
# 1. 检测Ubuntu环境
check_ubuntu() {
if ! command -v lsb_release &> /dev/null || [[ $(lsb_release -is) != "Ubuntu" ]]; then
echo "错误本脚本仅支持Ubuntu系统"
exit 1
fi
local version_id=$(lsb_release -rs)
if [[ ! " ${UBUNTU_IDS[*]} " =~ " ${version_id} " ]]; then
echo "错误不支持的Ubuntu版本 ${version_id},支持版本:${UBUNTU_IDS[*]}"
exit 1
fi
}
# 2. 替换阿里云源
set_aliyun_mirror() {
sudo sed -i "s/archive.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo sed -i "s/security.ubuntu.com/mirrors.aliyun.com/g" /etc/apt/sources.list
sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates
}
# 3. 准备Docker仓库
prepare_docker_env() {
sudo mkdir -p /etc/apt/keyrings
curl -fsSL $ALIYUN_MIRROR/docker-ce/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
local codename=$(lsb_release -cs)
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $ALIYUN_MIRROR/docker-ce/linux/ubuntu $codename stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
}
# 4. 版本解析优化版本
get_docker_version() {
local target_version=""
if [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then
# 提取大版本下最高小版本
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}([.-]|\~\w+)" \
| sort -rV \
| head -1)
elif [[ $DOCKER_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# 精确版本匹配
target_version=$(apt-cache madison docker-ce \
| awk -F'|' '{gsub(/ /,"",$2); print $2}' \
| grep -E "^[0-9]+:${DOCKER_VERSION}.*$(lsb_release -cs)" )
fi
[ -z "$target_version" ] && echo "错误找不到Docker版本 $DOCKER_VERSION" && exit 1
echo "$target_version" | sed 's/^[0-9]+://' # 去除前缀
}
# 5. 主流程
main() {
check_ubuntu
echo "-- 设置阿里云源 --"
set_aliyun_mirror
echo "-- 准备Docker仓库 --"
prepare_docker_env
echo "-- 解析Docker版本 --"
local full_version=$(get_docker_version)
echo "选择版本:$full_version"
echo "-- 安装组件 --"
sudo apt-get install -y \
docker-ce-cli="$full_version" \
docker-ce="$full_version" \
docker-ce-rootless-extras="$full_version" \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
echo "-- 安装docker-compose --"
sudo curl -sSL "https://get.daocloud.io/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "-- 禁用自动更新 --"
sudo apt-mark hold docker-ce docker-ce-cli containerd.io
echo "-- 启动服务 --"
sudo systemctl enable docker && sudo systemctl start docker
echo -e "\n=== 安装完成 ==="
docker --version
docker-compose --version
}
main
请写一个shell基于上述的部分安装逻辑实现如下的功能
脚本前面提取变量 docker的版本号 20.10.15 或 20.10(安装小版本最高的版本)
1. 检测当前主机是否是ubuntu环境本脚本支支持Ubuntu
2. 获取本机的版本号支持ubuntu18.04 20.04 22.04 24.04的版本
3. 根据ubuntu版本修改apt的镜像源为阿里源
4. 在线安装符合变量版本的docker在线安装docker-compose安装常用的插件
5. 禁止docker自动更新

View File

@@ -28,30 +28,16 @@ cat /usr/local/etc/wdd/agent-wdd-config.yaml
/usr/local/bin/agent-wdd base harbor install /usr/local/bin/agent-wdd base harbor install
# 主节点执行
# 安装octopus-agent
mv agent-wdd_linux_amd64 /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
# 主节点安装ssh-key
/usr/local/bin/agent-wdd base ssh config
/usr/local/bin/agent-wdd base ssh key
# 批量执行命令 # 批量执行命令
host_list=(
172.16.100.56
172.16.100.57
172.16.100.58
)
host_list=(
172.16.100.62
172.16.100.51
172.16.100.52
172.16.100.53
172.16.100.54
172.16.100.55
172.16.100.56
172.16.100.57
172.16.100.58
172.16.100.59
172.16.100.60
172.16.100.61
)
host_list=( host_list=(
172.16.100.56 172.16.100.56
172.16.100.57 172.16.100.57
@@ -63,6 +49,9 @@ for server in "${host_list[@]}";do
echo "" echo ""
done done
# 主节点批量安装key
# 复制 同步文件 # 复制 同步文件
export server=172.16.100.62 export server=172.16.100.62
@@ -70,6 +59,14 @@ scp /usr/local/bin/agent-wdd root@${server}:/usr/local/bin/agent-wdd
ssh root@${server} "/usr/local/bin/agent-wdd base ssh config" ssh root@${server} "/usr/local/bin/agent-wdd base ssh config"
ssh root@${server} "/usr/local/bin/agent-wdd base ssh key" ssh root@${server} "/usr/local/bin/agent-wdd base ssh key"
# 安装docker-compose
mv docker-compose-linux-x86_64 /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
# 磁盘初始化 # 磁盘初始化
ssh root@${server} "mkdir /root/wdd" ssh root@${server} "mkdir /root/wdd"
scp /root/wdd/disk.sh root@${server}:/root/wdd/ scp /root/wdd/disk.sh root@${server}:/root/wdd/
@@ -83,7 +80,7 @@ scp /root/wdd/docker-compose-v2.18.0-linux-amd64 root@${server}:/root/wdd/
ssh root@${server} "/usr/local/bin/agent-wdd info all" ssh root@${server} "/usr/local/bin/agent-wdd info all"
ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml" ssh root@${server} "cat /usr/local/etc/wdd/agent-wdd-config.yaml"
# ssh root@${server} "/usr/local/bin/agent-wdd base tools"
ssh root@${server} "/usr/local/bin/agent-wdd base swap" ssh root@${server} "/usr/local/bin/agent-wdd base swap"
ssh root@${server} "/usr/local/bin/agent-wdd base firewall" ssh root@${server} "/usr/local/bin/agent-wdd base firewall"
@@ -103,3 +100,20 @@ ssh root@${server} "systemctl restart docker"
ssh root@${server} "docker info" ssh root@${server} "docker info"
wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0 wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/nginx=1.27.0=2025-03-11=402.tar.gz && docker load < nginx=1.27.0=2025-03-11=402.tar.gz && docker run -it --rm harbor.cdcyy.com.cn/cmii/nginx:1.27.0
# 主节点执行 安装harbor仓库
/usr/local/bin/agent-wdd base harbor install
# 安装rke kubectl
mv /root/wdd/rke_amd64 /usr/local/bin/rke
chmod +x /usr/local/bin/rke
mv /root/wdd/kubectl /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl
# 安装 k8s-证书
curl -s https://172.29.137.125

View File

@@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
#nfs_data_path="/var/lib/docker/nfs_data" nfs_data_path="/var/lib/docker/nfs_data"
nfs_data_path="/data/nfs_data" #nfs_data_path="/data/nfs_data"
deploy_nfs_server(){ deploy_nfs_server(){

View File

@@ -1,8 +1,9 @@
upstream proxy_server { upstream proxy_server {
ip_hash; ip_hash;
server 172.16.100.55:30500; server 192.168.0.2:30500;
server 172.16.100.59:30500; server 192.168.0.4:30500;
server 172.16.100.60:30500; server 192.168.0.5:30500;
server 192.168.0.6:30500;
} }
server { server {
@@ -21,7 +22,7 @@ server {
proxy_buffering off; proxy_buffering off;
proxy_buffer_size 4k; proxy_buffer_size 4k;
proxy_buffers 4 12k; proxy_buffers 4 12k;
proxy_set_header Host fake-domain.eedsjc-uavms.io; proxy_set_header Host fake-domain.xakny.io;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";

View File

@@ -0,0 +1,50 @@
export tenant_name=outside
export inner_master_ip=Master节点的内网IP
export minio_host_ip=MINIO的内网IP
mc alias set ${tenant_name} http://${minio_host_ip}:9000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls ${tenant_name}/geodata ${tenant_name}/ilm-detect ${tenant_name}/ilm-geodata
echo ""
echo "set rabbit mq"
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@${inner_master_ip}:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
echo ""
echo "sleep 5 s!"
sleep 5
mc admin service restart ${tenant_name}
echo "sleep 5 s!"
sleep 5
echo ""
echo "start to add event notification !"
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-detect arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/ilm-geodata arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus
echo ""
echo "done of init !"

View File

@@ -18,6 +18,9 @@ kubectl delete -f k8s-nfs-test.yaml
cd /var/lib/docker/nfs_data cd /var/lib/docker/nfs_data
kubectl create ns xakny
kubectl apply -f k8s-pvc.yaml kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml kubectl delete -f k8s-pvc.yaml
@@ -37,13 +40,33 @@ kubectl delete -f k8s-redis.yaml
kubectl apply -f k8s-mysql.yaml kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml kubectl delete -f k8s-mysql.yaml
---- ----
doris部署
---
kubectl apply -f doris-pvc.yaml
kubectl apply -f doris-fe-configmap.yaml
kubectl apply -f doris-be-configmap.yaml
kubectl apply -f doris-be-internal-service.yaml
kubectl apply -f doris-be-service.yaml
kubectl apply -f doris-fe-internal-service.yaml
kubectl apply -f doris-fe-service.yaml
kubectl apply -f doris-fe-statusfulset.yaml
kubectl delete -f doris-fe-statusfulset.yaml
kubectl apply -f doris-be-statusfulset.yaml
kubectl delete -f doris-be-statusfulset.yaml
---
数据库初始化
---
kubectl apply -f k8s-nacos.yaml kubectl apply -f k8s-nacos.yaml
kubectl delete -f k8s-nacos.yaml kubectl delete -f k8s-nacos.yaml
---
vim k8s-configmap.yaml vim k8s-configmap.yaml
kubectl apply -f k8s-configmap.yaml kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml kubectl delete -f k8s-configmap.yaml

View File

@@ -1,6 +1,6 @@
export harbor_host=172.16.100.55:8033 export harbor_host=192.168.0.2:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -8,7 +8,7 @@ env:
value: "eth0" value: "eth0"
# 更加保险 # 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=ens18 kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
# 删除所有的calico pod # 删除所有的calico pod

View File

@@ -5,7 +5,7 @@ gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation" oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/root/octopus-image" local_gzip_path="/root/octopus-image"
DockerRegisterDomain="172.16.100.55:8033" # 需要根据实际修改 DockerRegisterDomain="192.168.0.2:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致 HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() { print_green() {
@@ -116,9 +116,9 @@ Load_Tag_Upload(){
shift # past argument shift # past argument
;; ;;
cmii) cmii)
local_gzip_path="$local_gzip_path/uavms-2.0" local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/uavms-2.0/" oss_prefix_url="$oss_prefix_url/cmii/"
ltu ltu
shift # past argument shift # past argument
;; ;;
@@ -163,6 +163,6 @@ test(){
} }
# test # test
#Download_Load_Tag_Upload "cmii" Download_Load_Tag_Upload "rke"
Load_Tag_Upload "cmii" # Load_Tag_Upload "cmii"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
name_space=szgz name_space=zjyd
delete_all_fronted_cmii_pod(){ delete_all_fronted_cmii_pod(){

View File

@@ -20,7 +20,7 @@
# ## #自动扩展XFS文件系统到最大的可用大小 # ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root # xfs_growfs /dev/mapper/centos-r oot
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}' # df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
@@ -72,9 +72,9 @@ echo ""
echo "" echo ""
df -TH df -TH
echo "-----------------------------------------------------------------------" echo "-----------------------------------------------------------------------"
s
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统 # 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root # lvextend -l +100%FREE /dev/mapper/s${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root # xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2' # 自定义 安装lvm2'

View File

@@ -0,0 +1,84 @@
#!/bin/bash
set -e
# 用户配置部分
DISK="/dev/sdb" # 要操作的物理磁盘(请根据实际情况修改)
MOUNT_PATH="/var/lib/docker" # 挂载点路径(目录会自动创建)
FS_TYPE="ext4" # 文件系统类型支持ext4/xfs默认ext4
#----------------------------------------------------------
# 核心逻辑(建议非必要不修改)
#----------------------------------------------------------
function check_prerequisites() {
# 必须root权限运行检查
[[ $EUID -ne 0 ]] && echo -e "\033[31m错误必须使用root权限运行此脚本\033[0m" && exit 1
# 磁盘存在性检查
[[ ! -b "$DISK" ]] && echo -e "\033[31m错误磁盘 $DISK 不存在\033[0m" && exit 1
# 文件系统类型校验
if [[ "$FS_TYPE" != "ext4" && "$FS_TYPE" != "xfs" ]]; then
echo -e "\033[31m错误不支持的磁盘格式 $FS_TYPE,仅支持 ext4/xfs\033[0m"
exit 1
fi
}
function prepare_disk() {
local partition="${DISK}1"
echo -e "\033[34m正在初始化磁盘分区...\033[0m"
parted "$DISK" --script mklabel gpt
parted "$DISK" --script mkpart primary 0% 100%
parted "$DISK" --script set 1 lvm on
partprobe "$DISK" # 确保系统识别新分区表
echo -e "\033[34m正在创建LVM结构...\033[0m"
pvcreate "$partition"
vgcreate datavg "$partition"
lvcreate -y -l 100%FREE -n lvdata datavg
}
function format_and_mount() {
echo -e "\033[34m格式化逻辑卷...\033[0m"
if [[ "$FS_TYPE" == "ext4" ]]; then
mkfs.ext4 -F "/dev/datavg/lvdata"
else
mkfs.xfs -f "/dev/datavg/lvdata"
fi
echo -e "\033[34m设置挂载配置...\033[0m"
mkdir -p "$MOUNT_PATH"
UUID=$(blkid -s UUID -o value "/dev/datavg/lvdata")
echo "UUID=$UUID $MOUNT_PATH $FS_TYPE defaults 0 0" | tee -a /etc/fstab >/dev/null
mount -a
}
function verify_result() {
echo -e "\n\033[1;36m最终验证结果\033[0m"
lsblk -f "$DISK"
echo -e "\n磁盘空间使用情况"
df -hT "$MOUNT_PATH"
}
# 主执行流程
check_prerequisites
prepare_disk
format_and_mount
verify_result
echo -e "\n\033[32m操作执行完毕请仔细核查上述输出信息\033[0m"
#请写一个shell脚本脚本前面有变量可以设置 物理磁盘名称 挂载点路径 磁盘格式化的形式,脚本实现如下的功能
#1.将物理磁盘的盘符修改为gpt格式
#2.将物理磁盘全部空间创建一个分区分区格式为lvm
#3.将分区分配给逻辑卷datavg
#4.将datavg所有可用的空间分配给逻辑卷lvdata
#5.将逻辑卷格式化为变量磁盘格式化的形式(支持xfs和ext4的格式,默认为ext4)
#6.创建变量挂载点路径
#7.写入/etc/fatab,将逻辑卷挂载到变量挂载点,执行全部挂在操作
#8.执行lsblk和df -TH查看分区是否正确挂载

View File

@@ -4,15 +4,12 @@
# 修改目录的权限为 # 修改目录的权限为
export sql_file_folder_name=uavms
chown 1001:1001 /var/lib/docker/mysql-pv/ynydapp/${sql_file_folder_name}/
# 然后执行mysql的pod
INSERT INTO `uav_lifecycle`.`regulator` (`id`, `name`, `is_system_admin`, `telephone`, `avatar_url`, `authentication_status`, `authentication_time`, `password`, `password_modify_time`, `is_frozen`, `is_del`, `create_at`, `create_by`, `update_at`, `update_by`) VALUES (1, '超级管理员', b'1', LOWER(HEX(AES_ENCRYPT('13800000000','TELEPHONE'))), NULL, 0, NULL, '$2a$10$zaAxaqvNzx8HdERMTrOF6u.InuKLSSi2VGQDBmYuEIG56ZqV6TwBu', NOW(), b'0', b'0', NOW(), 'r_1', NOW(), 'r_1');
export sql_file_folder_name=uas-2.0
export local_mysql_host_path="/var/lib/docker/mysql-pv/$sql_file_folder_name" export local_mysql_host_path="/var/lib/docker/mysql-pv/$sql_file_folder_name"
export sql_file_folder_name=2.1
chown 1001:1001 /var/lib/docker/mysql-pv/xakny/${sql_file_folder_name}/
# 然后执行mysql的pod
export sql_file_folder_name=2.1
export sql_import_file_path="/bitnami/mysql/${sql_file_folder_name}" export sql_import_file_path="/bitnami/mysql/${sql_file_folder_name}"
for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
echo "current file is ${sql_file}" echo "current file is ${sql_file}"
@@ -23,6 +20,12 @@ for sql_file in $(ls "$sql_import_file_path" | sort -n -k1.1,1.2); do
echo "" echo ""
done done
INSERT INTO `uav_lifecycle`.`regulator` (`id`, `name`, `is_system_admin`, `telephone`, `avatar_url`, `authentication_status`, `authentication_time`, `password`, `password_modify_time`, `is_frozen`, `is_del`, `create_at`, `create_by`, `update_at`, `update_by`) VALUES (1, '超级管理员', b'1', LOWER(HEX(AES_ENCRYPT('13800000000','TELEPHONE'))), NULL, 0, NULL, '$2a$10$zaAxaqvNzx8HdERMTrOF6u.InuKLSSi2VGQDBmYuEIG56ZqV6TwBu', NOW(), b'0', b'0', NOW(), 'r_1', NOW(), 'r_1');
# doris初始化
mysql -uroot -hdoris-cluster-fe-internal -P9030 < 1node_table_init.sql
# nacos备份 # nacos备份
## 在pod里面执行 ## 在pod里面执行
mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P3306 -t --set-gtid-purged=OFF cmii_nacos_config config_info his_config_info roles users > ${sql_import_file_path}/cmii_nacos_config_wdd.sql mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P3306 -t --set-gtid-purged=OFF cmii_nacos_config config_info his_config_info roles users > ${sql_import_file_path}/cmii_nacos_config_wdd.sql

Binary file not shown.

Binary file not shown.