Compare commits

..

10 Commits

Author SHA1 Message Date
zeaslity
4b274a02c8 新增雄安空能院项目 2025-08-12 09:59:32 +08:00
zeaslity
ce4165e36b update 2025-05-15 10:32:14 +08:00
zeaslity
56c79a02a8 修改CICD的jenkins构建脚本 2025-04-17 15:15:01 +08:00
zeaslity
ed9763f658 大量更新 2025-04-17 15:13:16 +08:00
zeaslity
edcb27f0dc 大量更新 2025-04-17 15:13:02 +08:00
zeaslity
b9a006fc23 Merge branch 'main' of https://gitea.107421.xyz/zeaslity/CmiiDeploy into main 2025-03-31 11:02:45 +08:00
zeaslity
30323ce15e 大量更新 2025-03-31 11:02:04 +08:00
zeaslity
06bb7fb300 common update 2025-03-17 09:19:32 +08:00
zeaslity
d8e2c67e36 123 2025-03-14 13:48:54 +08:00
zeaslity
77cafaf0a1 优化代码 2025-03-05 17:34:09 +08:00
245 changed files with 54206 additions and 2069 deletions

8
.idea/.gitignore generated vendored
View File

@@ -1,8 +0,0 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 数据源本地存储已忽略文件
/../../../../../../:\Users\wdd\Desktop\希腊项目-阿里云-德国\.idea/dataSources/
/dataSources.local.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/

10
.idea/CmiiDeploy.iml generated
View File

@@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module version="4">
<component name="TemplatesService">
<option name="TEMPLATE_FOLDERS">
<list>
<option value="$MODULE_DIR$/1-outside-deploy/2-helm-chart/nfs-subdir-external-provisioner/templates" />
</list>
</option>
</component>
</module>

12
.idea/dataSources.xml generated
View File

@@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="DataSourceManagerImpl" format="xml" multifile-model="true">
<data-source source="LOCAL" name="cmii@192.168.35.248" uuid="da65c4ab-9761-4932-8d77-09c9606d9b3e">
<driver-ref>mysql.8</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>com.mysql.cj.jdbc.Driver</jdbc-driver>
<jdbc-url>jdbc:mysql://192.168.35.248:3306/cmii</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
</component>
</project>

9
.idea/encodings.xml generated
View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding">
<file url="file://$PROJECT_DIR$/16-重庆公安/9-back.yaml" charset="GB18030" />
<file url="file://$PROJECT_DIR$/48-202412-浙江移动扩容/数据库增量570-611/0new_table_column.sql" charset="GB18030" />
<file url="file://$PROJECT_DIR$/48-202412-浙江移动扩容/数据库增量570-611/3new_table.sql" charset="GB18030" />
<file url="file://$PROJECT_DIR$/48-202412-浙江移动扩容/数据库增量570-611/4new_schema.sql" charset="GB18030" />
</component>
</project>

View File

@@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="fastRequestCollection">
<option name="detail">
<CollectionDetail>
<option name="childList">
<list>
<CollectionDetail>
<option name="groupId" value="1" />
<option name="id" value="1" />
<option name="name" value="Default Group" />
<option name="type" value="1" />
</CollectionDetail>
</list>
</option>
<option name="groupId" value="-1" />
<option name="id" value="0" />
<option name="name" value="Root" />
<option name="type" value="1" />
</CollectionDetail>
</option>
</component>
</project>

View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="fastRequestCurrentProjectConfig">
<option name="domain" value="http://localhost:8000" />
<option name="enableEnv" value="agent" />
<option name="enableProject" value="ProjectOctopus" />
</component>
</project>

View File

@@ -1,9 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="fastRequestCurrentProjectLocalConfig">
<option name="domain" value="http://localhost:8000" />
<option name="enableEnv" value="agent" />
<option name="enableProject" value="ProjectOctopus" />
<option name="initFlag" value="1" />
</component>
</project>

View File

@@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="fastRequestCollection">
<option name="detail">
<CollectionDetail>
<option name="childList">
<list>
<CollectionDetail>
<option name="groupId" value="1" />
<option name="id" value="1" />
<option name="name" value="Default Group" />
<option name="type" value="1" />
</CollectionDetail>
</list>
</option>
<option name="groupId" value="-1" />
<option name="id" value="0" />
<option name="name" value="Root" />
<option name="type" value="1" />
</CollectionDetail>
</option>
</component>
</project>

View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="fastRequestCurrentProjectConfig">
<option name="domain" value="http://localhost:8000" />
<option name="enableEnv" value="agent" />
<option name="enableProject" value="ProjectOctopus" />
</component>
</project>

View File

@@ -1,8 +0,0 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="ShellCheck" enabled="true" level="ERROR" enabled_by_default="true">
<shellcheck_settings value="SC2129" />
</inspection_tool>
</profile>
</component>

View File

@@ -1,204 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JavaDocConfiguration">
<GENERAL>
<MODE>UPDATE</MODE>
<OVERRIDDEN_METHODS>false</OVERRIDDEN_METHODS>
<SPLITTED_CLASS_NAME>true</SPLITTED_CLASS_NAME>
<LEVELS>
<LEVEL>TYPE</LEVEL>
<LEVEL>METHOD</LEVEL>
<LEVEL>FIELD</LEVEL>
</LEVELS>
<VISIBILITIES>
<VISIBILITY>PUBLIC</VISIBILITY>
<VISIBILITY>DEFAULT</VISIBILITY>
<VISIBILITY>PROTECTED</VISIBILITY>
</VISIBILITIES>
</GENERAL>
<TEMPLATES>
<CLASSES>
<CLASS>
<KEY>^.*(public|protected|private)*.+interface\s+\w+.*</KEY>
<VALUE>/**\n
* The interface ${name}.\n
&lt;#if element.typeParameters?has_content&gt; * \n
&lt;/#if&gt;
&lt;#list element.typeParameters as typeParameter&gt;
* @param &lt;${typeParameter.name}&gt; the type parameter\n
&lt;/#list&gt;
*/</VALUE>
</CLASS>
<CLASS>
<KEY>^.*(public|protected|private)*.+enum\s+\w+.*</KEY>
<VALUE>/**\n
* The enum ${name}.\n
*/</VALUE>
</CLASS>
<CLASS>
<KEY>^.*(public|protected|private)*.+class\s+\w+.*</KEY>
<VALUE>/**\n
* The type ${name}.\n
&lt;#if element.typeParameters?has_content&gt; * \n
&lt;/#if&gt;
&lt;#list element.typeParameters as typeParameter&gt;
* @param &lt;${typeParameter.name}&gt; the type parameter\n
&lt;/#list&gt;
*/</VALUE>
</CLASS>
<CLASS>
<KEY>.+</KEY>
<VALUE>/**\n
* The type ${name}.\n
*/</VALUE>
</CLASS>
</CLASSES>
<CONSTRUCTORS>
<CONSTRUCTOR>
<KEY>.+</KEY>
<VALUE>/**\n
* Instantiates a new ${name}.\n
&lt;#if element.parameterList.parameters?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.parameterList.parameters as parameter&gt;
* @param ${parameter.name} the ${paramNames[parameter.name]}\n
&lt;/#list&gt;
&lt;#if element.throwsList.referenceElements?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.throwsList.referenceElements as exception&gt;
* @throws ${exception.referenceName} the ${exceptionNames[exception.referenceName]}\n
&lt;/#list&gt;
*/</VALUE>
</CONSTRUCTOR>
</CONSTRUCTORS>
<METHODS>
<METHOD>
<KEY>^.*(public|protected|private)*\s*.*(\w(\s*&lt;.+&gt;)*)+\s+get\w+\s*\(.*\).+</KEY>
<VALUE>/**\n
* Gets ${partName}.\n
&lt;#if element.typeParameters?has_content&gt; * \n
&lt;/#if&gt;
&lt;#list element.typeParameters as typeParameter&gt;
* @param &lt;${typeParameter.name}&gt; the type parameter\n
&lt;/#list&gt;
&lt;#if element.parameterList.parameters?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.parameterList.parameters as parameter&gt;
* @param ${parameter.name} the ${paramNames[parameter.name]}\n
&lt;/#list&gt;
&lt;#if isNotVoid&gt;
*\n
* @return the ${partName}\n
&lt;/#if&gt;
&lt;#if element.throwsList.referenceElements?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.throwsList.referenceElements as exception&gt;
* @throws ${exception.referenceName} the ${exceptionNames[exception.referenceName]}\n
&lt;/#list&gt;
*/</VALUE>
</METHOD>
<METHOD>
<KEY>^.*(public|protected|private)*\s*.*(void|\w(\s*&lt;.+&gt;)*)+\s+set\w+\s*\(.*\).+</KEY>
<VALUE>/**\n
* Sets ${partName}.\n
&lt;#if element.typeParameters?has_content&gt; * \n
&lt;/#if&gt;
&lt;#list element.typeParameters as typeParameter&gt;
* @param &lt;${typeParameter.name}&gt; the type parameter\n
&lt;/#list&gt;
&lt;#if element.parameterList.parameters?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.parameterList.parameters as parameter&gt;
* @param ${parameter.name} the ${paramNames[parameter.name]}\n
&lt;/#list&gt;
&lt;#if isNotVoid&gt;
*\n
* @return the ${partName}\n
&lt;/#if&gt;
&lt;#if element.throwsList.referenceElements?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.throwsList.referenceElements as exception&gt;
* @throws ${exception.referenceName} the ${exceptionNames[exception.referenceName]}\n
&lt;/#list&gt;
*/</VALUE>
</METHOD>
<METHOD>
<KEY>^.*((public\s+static)|(static\s+public))\s+void\s+main\s*\(\s*String\s*(\[\s*\]|\.\.\.)\s+\w+\s*\).+</KEY>
<VALUE>/**\n
* The entry point of application.\n
&lt;#if element.parameterList.parameters?has_content&gt;
*\n
&lt;/#if&gt;
* @param ${element.parameterList.parameters[0].name} the input arguments\n
&lt;#if element.throwsList.referenceElements?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.throwsList.referenceElements as exception&gt;
* @throws ${exception.referenceName} the ${exceptionNames[exception.referenceName]}\n
&lt;/#list&gt;
*/</VALUE>
</METHOD>
<METHOD>
<KEY>.+</KEY>
<VALUE>/**\n
* ${name}&lt;#if isNotVoid&gt; ${return}&lt;/#if&gt;.\n
&lt;#if element.typeParameters?has_content&gt; * \n
&lt;/#if&gt;
&lt;#list element.typeParameters as typeParameter&gt;
* @param &lt;${typeParameter.name}&gt; the type parameter\n
&lt;/#list&gt;
&lt;#if element.parameterList.parameters?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.parameterList.parameters as parameter&gt;
* @param ${parameter.name} the ${paramNames[parameter.name]}\n
&lt;/#list&gt;
&lt;#if isNotVoid&gt;
*\n
* @return the ${return}\n
&lt;/#if&gt;
&lt;#if element.throwsList.referenceElements?has_content&gt;
*\n
&lt;/#if&gt;
&lt;#list element.throwsList.referenceElements as exception&gt;
* @throws ${exception.referenceName} the ${exceptionNames[exception.referenceName]}\n
&lt;/#list&gt;
*/</VALUE>
</METHOD>
</METHODS>
<FIELDS>
<FIELD>
<KEY>^.*(public|protected|private)*.+static.*(\w\s\w)+.+</KEY>
<VALUE>/**\n
* The constant ${element.getName()}.\n
*/</VALUE>
</FIELD>
<FIELD>
<KEY>^.*(public|protected|private)*.*(\w\s\w)+.+</KEY>
<VALUE>/**\n
&lt;#if element.parent.isInterface()&gt;
* The constant ${element.getName()}.\n
&lt;#else&gt;
* The ${name}.\n
&lt;/#if&gt; */</VALUE>
</FIELD>
<FIELD>
<KEY>.+</KEY>
<VALUE>/**\n
&lt;#if element.parent.isEnum()&gt;
*${name} ${typeName}.\n
&lt;#else&gt;
* The ${name}.\n
&lt;/#if&gt;*/</VALUE>
</FIELD>
</FIELDS>
</TEMPLATES>
</component>
</project>

9
.idea/libraries/.xml generated Normal file
View File

@@ -0,0 +1,9 @@
<component name="libraryTable">
<library name="江西移动升级">
<CLASSES>
<root url="jar://$PROJECT_DIR$/56-202501-江西升级/江西移动升级.zip!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</component>

9
.idea/libraries/5_4_5_8.xml generated Normal file
View File

@@ -0,0 +1,9 @@
<component name="libraryTable">
<library name="吉林移动5.4~5.8">
<CLASSES>
<root url="jar://$PROJECT_DIR$/20-吉林移动/吉林移动5.4~5.8.zip!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</component>

9
.idea/libraries/5_7_0_6_0_0.xml generated Normal file
View File

@@ -0,0 +1,9 @@
<component name="libraryTable">
<library name="厦门5.7.0~6.0.0">
<CLASSES>
<root url="jar://$PROJECT_DIR$/39-202409-厦门移动升级/厦门5.7.0~6.0.0.zip!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</component>

9
.idea/libraries/emqx_docker.xml generated Normal file
View File

@@ -0,0 +1,9 @@
<component name="libraryTable">
<library name="emqx-docker">
<CLASSES>
<root url="jar://$PROJECT_DIR$/27-深圳政数局/emqx-docker.zip!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</component>

3
.idea/misc.xml generated
View File

@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_11" project-jdk-name="11" project-jdk-type="JavaSDK">
<component name="ProjectRootManager" version="2" languageLevel="JDK_23" default="true" project-jdk-name="graalvm-jdk-23" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>

2
.idea/modules.xml generated
View File

@@ -2,7 +2,7 @@
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/CmiiDeploy.iml" filepath="$PROJECT_DIR$/.idea/CmiiDeploy.iml" />
<module fileurl="file://$PROJECT_DIR$/CmiiDeploy.iml" filepath="$PROJECT_DIR$/CmiiDeploy.iml" />
</modules>
</component>
</project>

View File

@@ -1,83 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteTargetsManager">
<option name="projectDefaultTargetUuid" value="51624e4c-80a9-4a06-9313-56dab7118e52" />
<targets>
<target name="uav-core-test-1" type="ssh/sftp" uuid="2b521e8a-5592-48ea-a85b-c8de45124db5">
<config>
<option name="projectRootOnTarget" value="/root/wdd/" />
<option name="serverName" value="root@192.168.34.251:22 password" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="uav-dev-01" type="ssh/sftp" uuid="a43dc362-6ebf-46cd-9ad3-382918a7c3a6">
<config>
<option name="projectRootOnTarget" value="/root/wdd/" />
<option name="serverName" value="root@192.168.11.170:22 password" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="uav-ops-4" type="ssh/sftp" uuid="84e27999-efe5-4d80-aaa2-29a68c258adb">
<config>
<option name="projectRootOnTarget" value="/root/22.希腊项目-阿里云-德国" />
<option name="serverName" value="root@192.168.34.40:22 password" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="euler-test-server" type="ssh/sftp" uuid="51624e4c-80a9-4a06-9313-56dab7118e52">
<config>
<option name="projectRootOnTarget" value="/root/octopus/" />
<option name="serverName" value="root@10.250.0.60:22 password" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="LapPro-Ubuntu" type="ssh/sftp" uuid="ac4e16bb-33e8-4d2a-8c3f-55ed459a9e82">
<config>
<option name="projectRootOnTarget" value="/root/wdd/shell/" />
<option name="serverName" value="root@10.250.0.100:22 password" />
<option name="useRsync" value="true" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="重庆林业局" type="ssh/sftp" uuid="30bb2030-7d69-4023-90e7-82aa867fe2df">
<config>
<option name="projectRootOnTarget" value="/root/wdd/remote/" />
<option name="serverName" value="root@36.134.28.60:22 password" />
<option name="useRsync" value="true" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="35-71" type="ssh/sftp" uuid="77aa8fb3-ebc4-46fb-b1da-df2dcbe32be5">
<config>
<option name="projectRootOnTarget" value="/root/test" />
<option name="serverName" value="root@192.168.35.71:22 agent" />
<option name="useRsync" value="true" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
<target name="WSL - Ubuntu" type="wsl" uuid="26027ea5-f011-4fd5-8604-885011e3554e">
<config>
<option name="distributionMsId" value="Ubuntu" />
<option name="projectRootOnTarget" value="{exitCode=0, timeout=false, cancelled=false, stdout=/tmp/tmp.se9bQXBPo2&#10;, stderr=}/22.希腊项目-阿里云-德国" />
</config>
<ContributedStateBase type="BashSupportLanguageRuntime">
<config />
</ContributedStateBase>
</target>
</targets>
</component>
</project>

View File

@@ -1,8 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="删除状态为异常的Pod-Core" type="BashProRunConfiguration" singleton="false">
<target name="uav-core-test-1" />
<option name="scriptPath" value="$PROJECT_DIR$/../0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-删除状态为异常的Pod.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
</component>

View File

@@ -1,10 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="删除状态为异常的Pod-dev" type="BashProRunConfiguration" singleton="false">
<target name="uav-dev-01" />
<option name="scriptPath" value="$PROJECT_DIR$/../0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-删除状态为异常的Pod.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2">
<option name="RunConfigurationTask" enabled="true" run_configuration_name="删除状态为异常的Pod-Core" run_configuration_type="BashProRunConfiguration" />
</method>
</configuration>
</component>

245
.idea/workspace.xml generated Normal file
View File

@@ -0,0 +1,245 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="修改CICD的jenkins构建脚本">
<change afterPath="$PROJECT_DIR$/58-202503-新DEMO环境/1-磁盘挂载.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/ai-config.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/0-节点lable.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-all-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-be-statusfulset-localpv-failed.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/install_docker_offline.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/sshd_config" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/分块压缩合.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/压缩文件包.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/install_auth.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-web.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/主机授权文件.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/已有部署备份/授权码.json" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/66-202505-浙江二级监管/部署文件/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/0-批量脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cluster.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-backend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-dashboard.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-frontend.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-ingress.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mongo.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-mysql.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nacos.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs-test.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-nfs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-rabbitmq.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-redis.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-srs.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/disk.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-internal-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-service.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-fe-statusfulset.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/doris-pvc.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/doris-deploy/修改pvc-然后statefulset中的image.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/b-联网-docker安装.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/x_minio初始化.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/1-高级-磁盘挂载.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/12-连云港公安-5.1.0/2-helm-chart/8-gdr-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" beforeDir="false" afterPath="$PROJECT_DIR$/30-刘喜通感-第三次部署/磁盘格式化.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris-deploy/doris-be-statusfulset.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/65-202504-湖南二级监管/doris部署-operator/doris-deplyment.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-Agent-WDD运行/啊-批量命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/deploy-nfs-server.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/真实nginx-reverse-proxy.conf" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/nginx暴露/设置ingress-nginx.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/z_执行apply命令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/手动创建harbor仓库.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/b-镜像同步/ImageSyncDLTU.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/重启cmii的前端后端Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/更新脚本/一键更新Tag脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/重启服务器恢复/1.1-minio-重启脚本.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" beforeDir="false" afterPath="$PROJECT_DIR$/999-数据库脚本/z_database_execute.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/kubectl" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/999-部署模板/rke" beforeDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="KubernetesApiPersistence">{}</component>
<component name="KubernetesApiProvider">{
&quot;isMigrated&quot;: true
}</component>
<component name="ProjectColorInfo">{
&quot;customColor&quot;: &quot;&quot;,
&quot;associatedIndex&quot;: 7
}</component>
<component name="ProjectId" id="2uIM2GeFYYmQNl2rSxI0hVkWGfv" />
<component name="ProjectLevelVcsManager">
<ConfirmationsSetting value="2" id="Add" />
</component>
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">{
&quot;keyToString&quot;: {
&quot;KUBERNETES_SUPPRESS_CONFIG_CLUSTER_SUGGESTION&quot;: &quot;true&quot;,
&quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
&quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.formatter.settings.were.checked&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.migrated.go.modules.settings&quot;: &quot;true&quot;,
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;,
&quot;last_opened_file_path&quot;: &quot;C:/Users/wddsh/Documents/IdeaProjects/CmiiDeploy/67-202508-雄安空能院&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;editor.preferences.tabs&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
}
}</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\doris-deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\已有部署备份" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\66-202505-浙江二级监管\部署文件" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\" />
<recent name="C:\Users\wdd\Documents\IdeaProjects\CmiiDeploy\999-部署模板" />
</key>
</component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-js-predefined-d6986cc7102b-e03c56caf84a-JavaScript-IU-252.23892.409" />
</set>
</attachedChunks>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="" />
<created>1741931426767</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1741931426767</updated>
<workItem from="1741931428457" duration="2818000" />
<workItem from="1742174299939" duration="90000" />
<workItem from="1742261723861" duration="1864000" />
<workItem from="1743390190498" duration="8331000" />
<workItem from="1743470150413" duration="10097000" />
<workItem from="1743643604712" duration="3410000" />
<workItem from="1743989590460" duration="6809000" />
<workItem from="1744092938322" duration="6438000" />
<workItem from="1744272013796" duration="6427000" />
<workItem from="1744768739183" duration="16694000" />
<workItem from="1745226889904" duration="10000" />
<workItem from="1745227705639" duration="6000" />
<workItem from="1747105478121" duration="6419000" />
<workItem from="1747206836624" duration="4541000" />
<workItem from="1747276114032" duration="9000" />
<workItem from="1747276405002" duration="63000" />
<workItem from="1748334855133" duration="976000" />
<workItem from="1748340788900" duration="276000" />
<workItem from="1748352348649" duration="849000" />
<workItem from="1748397131812" duration="14964000" />
<workItem from="1748482510696" duration="20005000" />
<workItem from="1748516175436" duration="1120000" />
<workItem from="1748571366058" duration="733000" />
<workItem from="1749025182387" duration="11000" />
<workItem from="1749086362716" duration="2000" />
<workItem from="1750151734362" duration="808000" />
<workItem from="1751268399225" duration="1300000" />
<workItem from="1751269718729" duration="3000" />
<workItem from="1752545931042" duration="197000" />
<workItem from="1752560622970" duration="149000" />
<workItem from="1754445517550" duration="9099000" />
<workItem from="1754530170490" duration="4574000" />
<workItem from="1754546282094" duration="6319000" />
<workItem from="1754616863007" duration="8752000" />
<workItem from="1754744988183" duration="10000" />
<workItem from="1754963140025" duration="708000" />
</task>
<task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" />
<created>1742174375760</created>
<option name="number" value="00001" />
<option name="presentableId" value="LOCAL-00001" />
<option name="project" value="LOCAL" />
<updated>1742174375760</updated>
</task>
<task id="LOCAL-00002" summary="修改CICD的jenkins构建脚本">
<option name="closed" value="true" />
<created>1744874102820</created>
<option name="number" value="00002" />
<option name="presentableId" value="LOCAL-00002" />
<option name="project" value="LOCAL" />
<updated>1744874102820</updated>
</task>
<option name="localTasksCounter" value="3" />
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="Vcs.Log.Tabs.Properties">
<option name="TAB_STATES">
<map>
<entry key="MAIN">
<value>
<State />
</value>
</entry>
</map>
</option>
</component>
<component name="VcsManagerConfiguration">
<MESSAGE value="common update" />
<MESSAGE value="修改CICD的jenkins构建脚本" />
<option name="LAST_COMMIT_MESSAGE" value="修改CICD的jenkins构建脚本" />
</component>
<component name="VgoProject">
<settings-migrated>true</settings-migrated>
</component>
</project>

883
.idea/workspace_1.xml generated
View File

@@ -1,883 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="536ce587-a268-4c42-86be-a9a7ea406a07" name="默认变更列表" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="BashSupport Pro Bash" />
</list>
</option>
</component>
<component name="GOROOT" url="file://$PROJECT_DIR$/../../../../../../../Program Files/Go" />
<component name="GitSEFilterConfiguration">
<file-type-list>
<filtered-out-file-type name="LOCAL_BRANCH" />
<filtered-out-file-type name="REMOTE_BRANCH" />
<filtered-out-file-type name="TAG" />
<filtered-out-file-type name="COMMIT_BY_MESSAGE" />
</file-type-list>
</component>
<component name="GoLibraries">
<option name="indexEntireGoPath" value="true" />
</component>
<component name="KubernetesApiProvider">{
&quot;isMigrated&quot;: true
}</component>
<component name="KubernetesSettings">
<option name="contextName" value="rke-cluster" />
</component>
<component name="MarkdownSettingsMigration">
<option name="stateVersion" value="1" />
</component>
<component name="ProjectColorInfo">{
&quot;customColor&quot;: &quot;&quot;,
&quot;associatedIndex&quot;: 2
}</component>
<component name="ProjectId" id="29YRFevtS0e3ceP6bRic8yRRUAg" />
<component name="ProjectViewState">
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent"><![CDATA[{
"keyToString": {
"BashSupport Pro.00-install-all-demand-softwares.sh.executor": "Run",
"BashSupport Pro.deploy-nfs-server.sh.executor": "Run",
"BashSupport Pro.k8s-ssl-CVE-2016-2183-漏洞修复.sh.executor": "Run",
"BashSupport Pro.lap从35-71同步所有得离线镜像.sh.executor": "Run",
"BashSupport Pro.uavcloud-回复副本为0的deployment.sh.executor": "Run",
"BashSupport Pro.uavcloud-查看命名空间中所有configmap内容.sh.executor": "Run",
"BashSupport Pro.uavcloud-消灭重启次数过多的应用.sh.executor": "Run",
"BashSupport Pro.uavcloud-调整副本从0到2.sh.executor": "Run",
"BashSupport Pro.uavcloud-重启全部的应用.sh.executor": "Run",
"BashSupport Pro.uavcloud-重启特定的应用.sh.executor": "Run",
"BashSupport Pro.uavcloud-重启过多置0-CORE.executor": "Run",
"BashSupport Pro.uavcloud-重启过多置0-DEV.executor": "Run",
"BashSupport Pro.下载离线安装包.sh.executor": "Run",
"BashSupport Pro.删除状态为异常的Pod-Core.executor": "Run",
"BashSupport Pro.删除状态为异常的Pod-dev.executor": "Run",
"BashSupport Pro.删除状态为异常的Pod.sh.executor": "Run",
"BashSupport Pro.批量命令.sh.executor": "Run",
"BashSupport Pro.更改所有deployment的版本号.sh.executor": "Run",
"BashSupport Pro.清除3571上错误的tag.sh.executor": "Run",
"BashSupport Pro.离线更新脚本.sh.executor": "Run",
"PowerShell.1-proxy-setup.ps1.executor": "Run",
"RunOnceActivity.OpenProjectViewOnStart": "true",
"RunOnceActivity.ShowReadmeOnStart": "true",
"RunOnceActivity.go.format.on.save.advertiser.fired": "true",
"RunOnceActivity.go.formatter.settings.were.checked": "true",
"RunOnceActivity.go.migrated.go.modules.settings": "true",
"WebServerToolWindowFactoryState": "false",
"com.google.cloudcode.ide_session_index": "20221216_0001",
"go.import.settings.migrated": "true",
"go.sdk.automatically.set": "true",
"ignore.virus.scanning.warn.message": "true",
"jdk.selected.JAVA_MODULE": "semeru-17",
"kotlin-language-version-configured": "true",
"last_opened_file_path": "D:/NextCloudData/MasterLoad/Work/UAV Cloud/22.希腊项目-阿里云-德国/27-深圳政数局",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
"project.structure.last.edited": "SDKs",
"project.structure.proportion": "0.15",
"project.structure.side.proportion": "0.2",
"run.code.analysis.last.selected.profile": "pProject Default",
"settings.editor.selected.configurable": "preferences.lookFeel",
"vue.rearranger.settings.migration": "true"
}
}]]></component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\27-深圳政数局" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\29-重庆三化" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\28-生产环境hw" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\998-常用脚本\容器镜像ARM" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\999-数据库脚本" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\998-常用脚本\镜像同步" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\998-常用脚本\部署脚本" />
<recent name="D:\NextCloudData\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\999-数据库脚本\5.5.0" />
<recent name="C:\Users\wddsh\Nextcloud\MasterLoad\Work\UAV Cloud\22.希腊项目-阿里云-德国\6-厦门移动-4.1.0" />
</key>
</component>
<component name="RunManager">
<configuration default="true" type="DjangoTestsConfigurationType">
<module name="希腊项目-阿里云-德国" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="TARGET" value="" />
<option name="SETTINGS_FILE" value="" />
<option name="CUSTOM_SETTINGS" value="false" />
<option name="USE_OPTIONS" value="false" />
<option name="OPTIONS" value="" />
<method v="2" />
</configuration>
<configuration default="true" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="希腊项目-阿里云-德国" />
<working_directory value="$PROJECT_DIR$" />
<go_parameters value="-i" />
<kind value="FILE" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<method v="2" />
</configuration>
<configuration default="true" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="希腊项目-阿里云-德国" />
<working_directory value="$PROJECT_DIR$" />
<go_parameters value="-i" />
<kind value="DIRECTORY" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<framework value="gotest" />
<method v="2" />
</configuration>
<configuration default="true" type="PythonConfigurationType" factoryName="Python">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration default="true" type="PythonConfigurationType" factoryName="Python">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration default="true" type="Python.FlaskServer">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="launchJavascriptDebuger" value="false" />
<method v="2" />
</configuration>
<configuration default="true" type="Python.FlaskServer">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="launchJavascriptDebuger" value="false" />
<method v="2" />
</configuration>
<configuration default="true" type="Tox" factoryName="Tox">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<method v="2" />
</configuration>
<configuration default="true" type="Tox" factoryName="Tox">
<module name="希腊项目-阿里云-德国" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<method v="2" />
</configuration>
<configuration name="00-install-all-demand-softwares.sh" type="BashProRunConfiguration">
<option name="outputCharset" value="UTF-8" />
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/17-重庆林业局/00-install-all-demand-softwares.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="00-install-all-demand-softwares.sh" type="BashProRunConfiguration">
<option name="outputCharset" value="UTF-8" />
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/17-重庆林业局/00-install-all-demand-softwares.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="清除3571上错误的tag.sh" type="BashProRunConfiguration">
<target name="35-71" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/清除3571上错误的tag.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="清除3571上错误的tag.sh" type="BashProRunConfiguration">
<target name="35-71" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/清除3571上错误的tag.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="下载离线安装包.sh" type="BashProRunConfiguration">
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/工具脚本/centos下载离线安装包.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration" temporary="true">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration" temporary="true">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration" temporary="true">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="批量命令.sh" type="BashProRunConfiguration">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/26-开发高可用集群/批量命令.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="下载离线安装包.sh" type="BashProRunConfiguration">
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/工具脚本/centos下载离线安装包.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="离线更新脚本.sh" type="BashProRunConfiguration">
<target name="root@192.168.35.71:22" />
<option name="scriptParameters" value="cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz" />
<option name="scriptPath" value="$PROJECT_DIR$/16-重庆公安/离线更新脚本.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="deploy-nfs-server.sh" type="BashProRunConfiguration">
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/部署脚本/deploy-nfs-server.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="deploy-nfs-server.sh" type="BashProRunConfiguration">
<target name="重庆林业局" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/部署脚本/deploy-nfs-server.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="更改所有deployment的版本号.sh" type="BashProRunConfiguration">
<option name="outputCharset" value="UTF-8" />
<target name="root@192.168.34.251:22" />
<option name="scriptPath" value="$PROJECT_DIR$/14-UAT-CHBN/更改所有deployment的版本号.sh" />
<RunnerSettings bashdbVersion="BashV4" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="更改所有deployment的版本号.sh" type="BashProRunConfiguration">
<option name="outputCharset" value="UTF-8" />
<target name="root@192.168.34.251:22" />
<option name="scriptPath" value="$PROJECT_DIR$/14-UAT-CHBN/更改所有deployment的版本号.sh" />
<RunnerSettings bashdbVersion="BashV4" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="k8s-ssl-CVE-2016-2183-漏洞修复.sh" type="BashProRunConfiguration">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/15-漏洞修复/demo环境漏洞修复/k8s-ssl-CVE-2016-2183-漏洞修复.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="k8s-ssl-CVE-2016-2183-漏洞修复.sh" type="BashProRunConfiguration">
<target name="uav-ops-4" />
<option name="scriptPath" value="$PROJECT_DIR$/15-漏洞修复/demo环境漏洞修复/k8s-ssl-CVE-2016-2183-漏洞修复.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="lap从35-71同步所有得离线镜像.sh" type="BashProRunConfiguration">
<target name="LapPro-Ubuntu" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/工具脚本/lap从35-71同步所有得离线镜像.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="lap从35-71同步所有得离线镜像.sh" type="BashProRunConfiguration">
<target name="LapPro-Ubuntu" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/工具脚本/lap从35-71同步所有得离线镜像.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-重启过多置0-CORE" type="BashProRunConfiguration">
<option name="outputCharset" value="ISO-8859-1" />
<target name="uav-core-test-1" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh" />
<RunnerSettings bashdbVersion="BashV5" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2">
<option name="RunConfigurationTask" enabled="true" run_configuration_name="uavcloud-重启过多置0-DEV" run_configuration_type="BashProRunConfiguration" />
</method>
</configuration>
<configuration name="uavcloud-重启过多置0-CORE" type="BashProRunConfiguration">
<option name="outputCharset" value="ISO-8859-1" />
<target name="uav-core-test-1" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh" />
<RunnerSettings bashdbVersion="BashV5" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2">
<option name="RunConfigurationTask" enabled="true" run_configuration_name="uavcloud-重启过多置0-DEV" run_configuration_type="BashProRunConfiguration" />
</method>
</configuration>
<configuration name="uavcloud-重启过多置0-DEV" type="BashProRunConfiguration">
<option name="outputCharset" value="ISO-8859-1" />
<target name="uav-dev-01" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh" />
<RunnerSettings bashdbVersion="BashV5" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-重启过多置0-DEV" type="BashProRunConfiguration">
<option name="outputCharset" value="ISO-8859-1" />
<target name="uav-dev-01" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-消灭重启次数过多的应用.sh" />
<RunnerSettings bashdbVersion="BashV5" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-调整副本从0到2.sh" type="BashProRunConfiguration">
<option name="scriptPath" value="$PROJECT_DIR$/12-连云港公安-5.1.0/common-usage/uavcloud-调整副本数量.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-调整副本从0到2.sh" type="BashProRunConfiguration">
<option name="scriptPath" value="$PROJECT_DIR$/12-连云港公安-5.1.0/common-usage/uavcloud-调整副本数量.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-回复副本为0的deployment.sh" type="BashProRunConfiguration">
<target name="uav-dev-01" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-回复副本为0的deployment.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-回复副本为0的deployment.sh" type="BashProRunConfiguration">
<target name="uav-dev-01" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-回复副本为0的deployment.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-重启全部的应用.sh" type="BashProRunConfiguration" temporary="true">
<option name="scriptPath" value="$PROJECT_DIR$/12-连云港公安-5.1.0/common-usage/uavcloud-重启全部的应用.sh" />
<method v="2" />
</configuration>
<configuration name="uavcloud-重启全部的应用.sh" type="BashProRunConfiguration" temporary="true">
<option name="scriptPath" value="$PROJECT_DIR$/12-连云港公安-5.1.0/common-usage/uavcloud-重启全部的应用.sh" />
<method v="2" />
</configuration>
<configuration name="uavcloud-重启特定的应用.sh" type="BashProRunConfiguration" temporary="true">
<option name="scriptPath" value="$PROJECT_DIR$/12-连云港公安-5.1.0/common-usage/uavcloud-重启-副本数量.sh" />
<method v="2" />
</configuration>
<configuration name="uavcloud-查看命名空间中所有configmap内容.sh" type="BashProRunConfiguration">
<target name="uav-core-test-1" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-查看命名空间中所有configmap内容.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration name="uavcloud-查看命名空间中所有configmap内容.sh" type="BashProRunConfiguration">
<target name="uav-core-test-1" />
<option name="scriptPath" value="$PROJECT_DIR$/0-常用脚本/uavcloud-被cmii_operator替代/uavcloud-查看命名空间中所有configmap内容.sh" />
<RunnerSettings bashdbVersion="Automatic" bashPathMapper="Automatic" RunnerId="pro.bashsupport.shDebugRunner" />
<method v="2" />
</configuration>
<configuration default="true" type="GoApplicationRunConfiguration" factoryName="Go Application">
<module name="希腊项目-阿里云-德国" />
<working_directory value="$PROJECT_DIR$" />
<go_parameters value="-i" />
<kind value="FILE" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<method v="2" />
</configuration>
<configuration default="true" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="希腊项目-阿里云-德国" />
<working_directory value="$PROJECT_DIR$" />
<go_parameters value="-i" />
<kind value="DIRECTORY" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<framework value="gotest" />
<method v="2" />
</configuration>
<configuration name="1-proxy-setup.ps1" type="PowerShellRunType" factoryName="PowerShell" temporary="true" scriptUrl="$PROJECT_DIR$/19-上海雷视/1-proxy-setup.ps1" workingDirectory="C:\Users\wddsh" executablePath="$PROJECT_DIR$/../../../../../../../Windows/System32/WindowsPowerShell/v1.0/powershell.exe">
<envs />
<method v="2" />
</configuration>
<configuration name="1-proxy-setup.ps1" type="PowerShellRunType" factoryName="PowerShell" temporary="true" scriptUrl="$PROJECT_DIR$/19-上海雷视/1-proxy-setup.ps1" workingDirectory="C:\Users\wddsh" executablePath="$PROJECT_DIR$/../../../../../../../Windows/System32/WindowsPowerShell/v1.0/powershell.exe">
<envs />
<method v="2" />
</configuration>
</component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-jdk-9823dce3aa75-b114ca120d71-intellij.indexing.shared.core-IU-242.20224.419" />
</set>
</attachedChunks>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="默认任务">
<changelist id="536ce587-a268-4c42-86be-a9a7ea406a07" name="默认变更列表" comment="" />
<created>1653287930427</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1653287930427</updated>
<workItem from="1653287931662" duration="7481000" />
<workItem from="1653458002388" duration="5154000" />
<workItem from="1653630320928" duration="4308000" />
<workItem from="1653873520244" duration="284000" />
<workItem from="1653875140037" duration="15368000" />
<workItem from="1653966593588" duration="1123000" />
<workItem from="1653976264777" duration="25090000" />
<workItem from="1654479875116" duration="5119000" />
<workItem from="1659412035614" duration="9962000" />
<workItem from="1659494508062" duration="1292000" />
<workItem from="1659923625074" duration="4996000" />
<workItem from="1659943895997" duration="16600000" />
<workItem from="1660034833244" duration="1613000" />
<workItem from="1660095207100" duration="6485000" />
<workItem from="1660114711677" duration="4932000" />
<workItem from="1660198379225" duration="12136000" />
<workItem from="1660525792807" duration="48209000" />
<workItem from="1661409375146" duration="16384000" />
<workItem from="1661757807739" duration="6539000" />
<workItem from="1661839830177" duration="22997000" />
<workItem from="1662692074205" duration="8022000" />
<workItem from="1663036095769" duration="1610000" />
<workItem from="1663319412391" duration="87000" />
<workItem from="1663319512826" duration="2174000" />
<workItem from="1663554896267" duration="8799000" />
<workItem from="1663659433000" duration="10515000" />
<workItem from="1664156538751" duration="344000" />
<workItem from="1664157258461" duration="1251000" />
<workItem from="1664433171017" duration="511000" />
<workItem from="1664433760386" duration="3603000" />
<workItem from="1665299351894" duration="2661000" />
<workItem from="1665373258253" duration="18000" />
<workItem from="1665457759742" duration="2378000" />
<workItem from="1665987986296" duration="1079000" />
<workItem from="1666169070993" duration="740000" />
<workItem from="1666678025967" duration="4055000" />
<workItem from="1666765127900" duration="619000" />
<workItem from="1666772921175" duration="1054000" />
<workItem from="1667196457858" duration="1641000" />
<workItem from="1667202590505" duration="4655000" />
<workItem from="1667284710404" duration="1353000" />
<workItem from="1667286891937" duration="18889000" />
<workItem from="1667445065742" duration="11000" />
<workItem from="1667456961956" duration="7631000" />
<workItem from="1668650197491" duration="6154000" />
<workItem from="1668680514776" duration="597000" />
<workItem from="1668755744752" duration="776000" />
<workItem from="1669099167300" duration="1553000" />
<workItem from="1669101055728" duration="2856000" />
<workItem from="1669173565133" duration="1318000" />
<workItem from="1669178158575" duration="3994000" />
<workItem from="1669707860073" duration="713000" />
<workItem from="1669789170654" duration="8878000" />
<workItem from="1669873612650" duration="53000" />
<workItem from="1669876511419" duration="2486000" />
<workItem from="1669948157161" duration="4165000" />
<workItem from="1669960369768" duration="2010000" />
<workItem from="1669965620429" duration="1950000" />
<workItem from="1669972051865" duration="442000" />
<workItem from="1670205899738" duration="8235000" />
<workItem from="1670233201081" duration="133000" />
<workItem from="1670233549976" duration="939000" />
<workItem from="1670463109495" duration="3158000" />
<workItem from="1670479679445" duration="4841000" />
<workItem from="1670566318209" duration="10845000" />
<workItem from="1670581006494" duration="71000" />
<workItem from="1670826567737" duration="4225000" />
<workItem from="1671178524516" duration="97071000" />
<workItem from="1673942094864" duration="231000" />
<workItem from="1676267908435" duration="861000" />
<workItem from="1676272980272" duration="412000" />
<workItem from="1676274246861" duration="1809000" />
<workItem from="1677045275225" duration="659000" />
<workItem from="1677728040197" duration="826000" />
<workItem from="1677736045109" duration="599000" />
<workItem from="1681450974797" duration="1130000" />
<workItem from="1685324368525" duration="17269000" />
<workItem from="1685409909814" duration="35231000" />
<workItem from="1685583422804" duration="6399000" />
<workItem from="1685668978991" duration="5578000" />
<workItem from="1685930656147" duration="11618000" />
<workItem from="1686117656483" duration="25406000" />
<workItem from="1686532873695" duration="595000" />
<workItem from="1686643358477" duration="7265000" />
<workItem from="1686878873283" duration="596000" />
<workItem from="1687143305269" duration="416000" />
<workItem from="1687143826364" duration="28000" />
<workItem from="1687143867025" duration="596000" />
<workItem from="1687226741026" duration="10214000" />
<workItem from="1687656264704" duration="647000" />
<workItem from="1687657264242" duration="1239000" />
<workItem from="1687742403817" duration="1321000" />
<workItem from="1689318904585" duration="2294000" />
<workItem from="1689580814640" duration="1747000" />
<workItem from="1689645288377" duration="31000" />
<workItem from="1689648084973" duration="7422000" />
<workItem from="1689922360667" duration="15756000" />
<workItem from="1690163346505" duration="29745000" />
<workItem from="1690421328158" duration="352000" />
<workItem from="1690854596224" duration="2157000" />
<workItem from="1690944047626" duration="992000" />
<workItem from="1690955243056" duration="3207000" />
<workItem from="1691026106746" duration="142000" />
<workItem from="1691563406506" duration="2598000" />
<workItem from="1691996825411" duration="4088000" />
<workItem from="1692069071411" duration="34000" />
<workItem from="1692861655714" duration="24709000" />
<workItem from="1693186957967" duration="4719000" />
<workItem from="1693192626727" duration="11695000" />
<workItem from="1693280575699" duration="610000" />
<workItem from="1693295450063" duration="6000" />
<workItem from="1693363361254" duration="786000" />
<workItem from="1693460588621" duration="5525000" />
<workItem from="1693534366828" duration="57000" />
<workItem from="1693535004514" duration="6001000" />
<workItem from="1694397421236" duration="16778000" />
<workItem from="1694498478218" duration="6000" />
<workItem from="1694500439349" duration="66000" />
<workItem from="1694571951196" duration="3000" />
<workItem from="1694588953618" duration="1978000" />
<workItem from="1694698028699" duration="599000" />
<workItem from="1694740686013" duration="2205000" />
<workItem from="1694746011845" duration="559000" />
<workItem from="1695003648967" duration="50151000" />
<workItem from="1695201136366" duration="2000000" />
<workItem from="1695203281423" duration="4420000" />
<workItem from="1695611357919" duration="7264000" />
<workItem from="1695691085873" duration="1156000" />
<workItem from="1695716411140" duration="3387000" />
<workItem from="1695784691052" duration="10555000" />
<workItem from="1696126268621" duration="5000" />
<workItem from="1696646933578" duration="6353000" />
<workItem from="1696667084493" duration="1224000" />
<workItem from="1697078008958" duration="3048000" />
<workItem from="1697096600174" duration="5937000" />
<workItem from="1698461230584" duration="2056000" />
<workItem from="1698475606618" duration="8295000" />
<workItem from="1698631647202" duration="1214000" />
<workItem from="1698633643184" duration="5452000" />
<workItem from="1698646395101" duration="6390000" />
<workItem from="1698715555654" duration="5813000" />
<workItem from="1698736019149" duration="4599000" />
<workItem from="1699424693797" duration="845000" />
<workItem from="1699430324617" duration="1448000" />
<workItem from="1699432788105" duration="498000" />
<workItem from="1699508854963" duration="789000" />
<workItem from="1699513519934" duration="3002000" />
<workItem from="1699840654297" duration="1261000" />
<workItem from="1699931994159" duration="2773000" />
<workItem from="1700039208825" duration="801000" />
<workItem from="1700099993929" duration="601000" />
<workItem from="1700116483859" duration="2028000" />
<workItem from="1700188115850" duration="227000" />
<workItem from="1700188410662" duration="599000" />
<workItem from="1700443128353" duration="7155000" />
<workItem from="1700536168822" duration="15536000" />
<workItem from="1700644680217" duration="32000" />
<workItem from="1701912403161" duration="8255000" />
<workItem from="1701928017473" duration="724000" />
<workItem from="1701929782158" duration="4771000" />
<workItem from="1701935344261" duration="21383000" />
<workItem from="1702257698369" duration="4139000" />
<workItem from="1702262445711" duration="7707000" />
<workItem from="1702284646665" duration="15118000" />
<workItem from="1702430735936" duration="442000" />
<workItem from="1702434796604" duration="3310000" />
<workItem from="1702455900324" duration="14000" />
<workItem from="1702456028103" duration="699000" />
<workItem from="1702612128694" duration="3727000" />
<workItem from="1702867689253" duration="2503000" />
<workItem from="1703147857035" duration="1161000" />
<workItem from="1703556095950" duration="18580000" />
<workItem from="1703645719814" duration="13848000" />
<workItem from="1703728974537" duration="70000" />
<workItem from="1703729085882" duration="3786000" />
<workItem from="1703745329152" duration="5511000" />
<workItem from="1703813282513" duration="1084000" />
<workItem from="1703831007882" duration="2054000" />
<workItem from="1704158054927" duration="54420000" />
<workItem from="1704676540705" duration="3458000" />
<workItem from="1704787095969" duration="5673000" />
<workItem from="1705305128006" duration="14000" />
<workItem from="1705306023607" duration="626000" />
<workItem from="1705386066840" duration="236000" />
<workItem from="1705477361614" duration="207000" />
<workItem from="1705480630088" duration="3459000" />
<workItem from="1705635357723" duration="7443000" />
<workItem from="1705911369234" duration="2690000" />
<workItem from="1705980729319" duration="96000" />
<workItem from="1705989475640" duration="3068000" />
<workItem from="1706060227378" duration="5389000" />
<workItem from="1706084688214" duration="1700000" />
<workItem from="1706163958578" duration="396000" />
<workItem from="1706164366170" duration="10556000" />
<workItem from="1706239417150" duration="689000" />
<workItem from="1706325146302" duration="1702000" />
<workItem from="1706327396142" duration="122000" />
<workItem from="1706327530973" duration="599000" />
<workItem from="1706409791139" duration="791000" />
<workItem from="1706415398503" duration="1243000" />
<workItem from="1706494810488" duration="4557000" />
<workItem from="1706503169073" duration="14451000" />
<workItem from="1706603560897" duration="11539000" />
<workItem from="1706695523259" duration="186000" />
<workItem from="1706696594896" duration="358000" />
<workItem from="1706755060128" duration="6281000" />
<workItem from="1706775432807" duration="160000" />
<workItem from="1709006527546" duration="829000" />
<workItem from="1709192114682" duration="628000" />
<workItem from="1709282311726" duration="682000" />
<workItem from="1709516344330" duration="3082000" />
<workItem from="1709533543118" duration="2395000" />
<workItem from="1709537593943" duration="2246000" />
<workItem from="1709540251395" duration="21513000" />
<workItem from="1709705339864" duration="9091000" />
<workItem from="1709797787731" duration="10565000" />
<workItem from="1709877638906" duration="969000" />
<workItem from="1709879057541" duration="4000" />
<workItem from="1709884678798" duration="4290000" />
<workItem from="1709891341438" duration="3117000" />
<workItem from="1710126952853" duration="3000" />
<workItem from="1710207504828" duration="4359000" />
<workItem from="1710293074549" duration="597000" />
<workItem from="1710312890547" duration="678000" />
<workItem from="1710402161605" duration="8133000" />
<workItem from="1710724106186" duration="1109000" />
<workItem from="1710828922408" duration="2229000" />
<workItem from="1710896466404" duration="1012000" />
<workItem from="1710898563824" duration="3232000" />
<workItem from="1711003476122" duration="34000" />
<workItem from="1711070662404" duration="718000" />
<workItem from="1711075396957" duration="3865000" />
<workItem from="1711088503642" duration="1783000" />
<workItem from="1711329683458" duration="3908000" />
<workItem from="1711345666193" duration="3291000" />
<workItem from="1711353831911" duration="2645000" />
<workItem from="1711421081061" duration="2365000" />
<workItem from="1711435256183" duration="613000" />
<workItem from="1711593432421" duration="2271000" />
<workItem from="1711608498753" duration="612000" />
<workItem from="1711611828628" duration="801000" />
<workItem from="1711617026309" duration="1140000" />
<workItem from="1711676055330" duration="2320000" />
<workItem from="1712455426402" duration="13000" />
<workItem from="1712473761506" duration="91000" />
<workItem from="1712474204716" duration="73000" />
<workItem from="1712474286055" duration="1484000" />
<workItem from="1712475915381" duration="9000" />
<workItem from="1712475931313" duration="2001000" />
<workItem from="1712540257716" duration="500000" />
<workItem from="1712541535910" duration="7842000" />
<workItem from="1712567513291" duration="447000" />
<workItem from="1712643096324" duration="1466000" />
<workItem from="1712646418805" duration="655000" />
<workItem from="1712906213006" duration="22000" />
<workItem from="1712907960003" duration="3490000" />
<workItem from="1712913030784" duration="1030000" />
<workItem from="1712999253993" duration="3310000" />
<workItem from="1713056026159" duration="2423000" />
<workItem from="1713067664110" duration="2534000" />
<workItem from="1713150627880" duration="9000" />
<workItem from="1713150641260" duration="2384000" />
<workItem from="1713161764325" duration="1794000" />
<workItem from="1713166484744" duration="4348000" />
<workItem from="1713322933139" duration="809000" />
<workItem from="1713324030118" duration="203000" />
<workItem from="1713343472998" duration="718000" />
<workItem from="1713404365084" duration="1322000" />
<workItem from="1713408099081" duration="6393000" />
<workItem from="1713513373225" duration="1421000" />
<workItem from="1713923654098" duration="1066000" />
<workItem from="1713928529313" duration="223000" />
<workItem from="1713929588086" duration="477000" />
<workItem from="1713931002835" duration="774000" />
<workItem from="1713937645573" duration="10001000" />
<workItem from="1714007428939" duration="605000" />
<workItem from="1714008828846" duration="232000" />
<workItem from="1714009071584" duration="4998000" />
<workItem from="1714034778723" duration="434000" />
<workItem from="1714095098840" duration="1141000" />
<workItem from="1714101454736" duration="647000" />
<workItem from="1714102616602" duration="544000" />
<workItem from="1714110371171" duration="4118000" />
<workItem from="1714116995463" duration="1552000" />
<workItem from="1714119949315" duration="560000" />
<workItem from="1715052013546" duration="877000" />
<workItem from="1715131554795" duration="8125000" />
<workItem from="1715305489843" duration="4405000" />
<workItem from="1715995123969" duration="3000" />
<workItem from="1715995333597" duration="133000" />
<workItem from="1715999224873" duration="361000" />
<workItem from="1716000131458" duration="95000" />
<workItem from="1716000521744" duration="1206000" />
<workItem from="1716002779594" duration="1044000" />
<workItem from="1716172101314" duration="2663000" />
<workItem from="1716187202674" duration="249000" />
<workItem from="1716191241250" duration="612000" />
<workItem from="1716947679768" duration="1470000" />
<workItem from="1716950698186" duration="62000" />
<workItem from="1716966421467" duration="625000" />
<workItem from="1717052009930" duration="879000" />
<workItem from="1717053868354" duration="1493000" />
<workItem from="1717124638091" duration="3232000" />
<workItem from="1717143011491" duration="3957000" />
<workItem from="1717397171731" duration="4000" />
<workItem from="1717408527596" duration="844000" />
<workItem from="1717465478936" duration="62000" />
<workItem from="1717465595262" duration="7202000" />
<workItem from="1717482055924" duration="5417000" />
<workItem from="1717555274601" duration="14797000" />
<workItem from="1717641170034" duration="1196000" />
<workItem from="1717653843514" duration="2842000" />
<workItem from="1717661366080" duration="929000" />
<workItem from="1718158879686" duration="260000" />
<workItem from="1718161581302" duration="2271000" />
<workItem from="1718175087066" duration="2395000" />
<workItem from="1718241465057" duration="1147000" />
<workItem from="1718265870498" duration="1300000" />
<workItem from="1718328051232" duration="2694000" />
<workItem from="1718592143491" duration="342000" />
<workItem from="1718592578226" duration="3312000" />
<workItem from="1718611431443" duration="1329000" />
<workItem from="1718677299457" duration="1527000" />
<workItem from="1718695127174" duration="3336000" />
<workItem from="1718702888894" duration="937000" />
<workItem from="1718767213721" duration="220000" />
<workItem from="1718778857059" duration="2208000" />
<workItem from="1718864356266" duration="133000" />
<workItem from="1718932091196" duration="11000" />
<workItem from="1718937754956" duration="6997000" />
<workItem from="1718961213073" duration="720000" />
<workItem from="1718961946908" duration="384000" />
<workItem from="1718962559454" duration="147000" />
<workItem from="1719208711688" duration="1035000" />
<workItem from="1719277104901" duration="610000" />
<workItem from="1719295000891" duration="2407000" />
<workItem from="1719364649872" duration="2680000" />
<workItem from="1719473456780" duration="3632000" />
<workItem from="1719538000388" duration="4208000" />
<workItem from="1719798250389" duration="5085000" />
<workItem from="1719823536035" duration="1515000" />
<workItem from="1719883199962" duration="3534000" />
<workItem from="1719991600192" duration="691000" />
<workItem from="1719996584816" duration="5851000" />
<workItem from="1720574415014" duration="5468000" />
<workItem from="1720661239359" duration="2658000" />
<workItem from="1720682974164" duration="4659000" />
<workItem from="1720747205733" duration="3146000" />
<workItem from="1721114555665" duration="861000" />
<workItem from="1721115555542" duration="3818000" />
<workItem from="1721181583375" duration="5437000" />
<workItem from="1721208006372" duration="869000" />
<workItem from="1721265450939" duration="8539000" />
<workItem from="1721291308438" duration="757000" />
<workItem from="1721356537885" duration="2299000" />
<workItem from="1724117335015" duration="395000" />
<workItem from="1724119223765" duration="308000" />
<workItem from="1724308384728" duration="1020000" />
<workItem from="1724309716182" duration="686000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="UnknownFeatures">
<option featureType="com.intellij.fileTypeFactory" implementationName="*.conf" />
<option featureType="com.intellij.fileTypeFactory" implementationName="*.service" />
</component>
<component name="VgoProject">
<integration-enabled>false</integration-enabled>
<settings-migrated>true</settings-migrated>
</component>
<component name="XSLT-Support.FileAssociations.UIState">
<expand />
<select />
</component>
</project>

6
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,6 @@
{
"marscode.codeCompletionPro": {
"enableCodeCompletionPro": true
},
"marscode.enableInlineCommand": true
}

View File

@@ -12,3 +12,12 @@ curl -x socks5h://103.0.180.82:9997 https://oss.demo.uavcmlc.com/cmlc-installati
curl -x socks5h://103.0.180.82:9997 https://oss.demo.uavcmlc.com/cmlc-installation/shls/middleware-images.tar.gz -o middleware-images.tar.gz
curl http://103.0.180.82:9000/octopus/mysql-8.0.27-linux-glibc2.17-x86_64-minimal.zip
wget 42.192.52.227:9000/octopus/doris/chengdu.csv
wget 42.192.52.227:9000/octopus/doris/cmii_dwd_reg_grid_mowork_signal_detail_dd.csv
wget 42.192.52.227:9000/octopus/doris/cmii_integration_tmpnal_micro_meteorology.csv
wget 42.192.52.227:9000/octopus/doris/doris-v2-v21.sql

View File

@@ -0,0 +1,69 @@
server {
listen 8888;
server_name lingyun.jlsxwkj.com;
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 5m;
ssl_prefer_server_ciphers on;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_certificate /etc/nginx/conf.d/ssl.d/jlsxwkj.com.pem;
ssl_certificate_key /etc/nginx/conf.d/ssl.d/jlsxwkj.com.key;
location / {
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.jlyd.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /sense/ {
proxy_pass http://localhost:8888/api/adapter/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
rewrite ^/sense/(.*)$ /api/adapter/$1 break;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
#proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
#proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location /live/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30558/live/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -2,24 +2,20 @@
1. 内核配置修改、优化关闭swap、SELinux等服务器环境初始化
2. 通用工具安装curl wget vim ping telnet等、配置免密登录等
3. 安装时间同步服务器,对所有的服务器的时间进行同步
第二天
1. 离线、下载并安装docker服务修改优化docker配置
2. 安装docker-compose 安装Harbor服务器 配置服务器免密拉取镜像
3. 离线下载安装GlusterFS、Heketi、NFS配置安装底层存储服务
1. 离线下载Kubernetes安装文件
2. 上传Kubernetes离线安装包到所有服务器节点
3. 修改对应的kuberntes安装部署配置
4. 配置apiserver、controller-manager、scheduler、kube-proxy服务文件并安装、安装master和node节点验证Kubernetes集群安装
第四天
1. 安装calico网络插件
2. 验证Kubernetes集群对网络、节点、镜像等验证
3. 下载离线镜像安装服务暴露层Ingress组件
1. 下载所有中间件所需的镜像,并进行中间件的配置及安装部署
2. 安装MySQL数据库组件并进行配置验证
3. 安装MongoDB并完成配置与验证
@@ -28,26 +24,18 @@
6. 安装Rabbit MQ消息队列组件并配置验证
7. 安装Nacos微服务注册中心组件并配置验证
1. 配置存储类、创建存储卷、安装业务层对象存储服务Minio集群
2. 安装视频流媒体业务SRS-Cluster
3. 确保推拉流服务正常运行
第七天
1. 离线下载并上传所有的中移凌云平台业务镜像
2. 根据环境适配中移凌云平台的业务配置并上传
3. 安装中移凌云基础初始化数据,运营平台数据初始化
1. 部署所有业务组件微服务,确保所有微服务正常运行
2. 初步达到平台的运行(不可使用)
第九天
1. 部署GDR转码服务器完成配置信息
2. 保证GDR服务运行正常
3. 开始初步联调
第十天
1. 中移凌云平台安装部署功能初步验证
2. 平台业务组件运行联测
3. 确保平台核心功能正常可用

View File

@@ -6,17 +6,17 @@ worker_server_list=(192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-worker-06 dev-worker-07 dev-worker-08 dev-worker-09)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104 192.168.40.53 192.168.40.54 192.168.40.55)
for server in "${all_server_list[@]}"; do
echo "server is ${server}"
ssh root@"${server}" "df -TH"
result=$(ssh root@"$server" "df -TH | awk '\$6+0 > 60'| grep -v nfs")
if [ -n "$result" ]; then
echo "server is $server"
echo "$result"
echo "-----------------------------"
fi
done

View File

@@ -0,0 +1,8 @@
#!/bin/bash
cd /root/wdd/
echo ""> nohup.out
nohup ./port_forward_linux_amd64 tcp listen:192.168.9.11:6006 conn:120.26.107.230:6006 & nohup ./port_forward_linux_amd64 tcp listen:192.168.9.11:6005 conn:120.26.107.230:6005

View File

@@ -0,0 +1,178 @@
nodes:
#masternode配置
- address: 10.215.66.85
user: rke-installer
role:
- controlplane
- etcd
- worker
internal_address: 10.215.66.85
labels:
ingress-deploy: true
#worker-1到worker-5加入集群
- address: 10.215.66.86
user: rke-installer
role:
- worker
internal_address: 10.215.66.86
- address: 10.215.66.87
user: rke-installer
role:
- worker
internal_address: 10.215.66.87
- address: 10.215.66.88
user: rke-installer
role:
- worker
internal_address: 10.215.66.88
- address: 10.215.66.90
user: rke-installer
role:
- worker
internal_address: 10.215.66.90
#worker-5资源低用于跑mysql
- address: 10.215.66.91
user: rke-installer
role:
- worker
internal_address: 10.215.66.91
labels:
mysql-deploy: true
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
#kubernetes_version: v1.18.16-rancher1-1
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /home/rke-installer/.ssh/id_rsa
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to debug-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: true
# Set max pods to 250 instead of default 110
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
authentication:
strategy: x509
sans:
- "10.215.66.85"
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
#add by zxc@241129
# calico_network_provider:
# ipip:
# mode: cross-subnet
# interface: ens160
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
private_registries:
- url: 10.215.66.85:8033 # 私有镜像库地址
user: admin
password: "Test@2"
is_default: true
#注nodesip改为master及worker的ip
#authenticationip改为master的ip
#private_registriesip改为master的ip

View File

@@ -0,0 +1,18 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-backend-log-pvc
namespace: gsyd-app
labels:
app.kubernetes.io/version: 6.0.0
cmii.app: nfs-backend-log-pvc
cmii.type: middleware-base
helm.sh/chart: all-persistence-volume-claims-1.1.0
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
storageClassName: nfs-prod-distribute
volumeMode: Filesystem

View File

@@ -0,0 +1,59 @@
upstream proxy_server {
ip_hash;
server 192.168.186.12:30500;
server 192.168.186.13:30500;
server 192.168.186.14:30500;
}
server {
listen 8088 ssl;
server_name 5ga-onesky.yncloudsaas.com;
# 指定证书和私钥路径
ssl_certificate /etc/nginx/conf.d/ssl.d/x.yncloudsaas.com.cert; # 包含域名证书+中间证书
ssl_certificate_key /etc/nginx/conf.d/ssl.d/x.yncloudsaas.com.key; # 私钥
# 可选:增强安全性配置
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
location / {
proxy_pass http://proxy_server;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.ynydapp.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -0,0 +1,10 @@
https://36.138.132.240:30554/
eyJhbGciOiJSUzI1NiIsImtpZCI6IjlPX3VXRndkOEVvbHg1OF9pVmlBbmlYUXEzdmEybW9KQ2s0dVl0ZmRxZXMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWRucXc5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkYmY2MzM5Ni00MTA5LTQ5MGEtOWE2MS02ZmQ0YTA1NWNkNDQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Ksh_Cd_C_f5IjKA2ZWv9u4JVkHlxM-PhfEdtt6g2NC87SMsidKkkQ3uSaj4Egnf0dOuRZIsVj67fVrarnsTl5Hy2N8zrgXP1o53YsauALBsqO_ShmdYMHYgFvJ5U_3Ig54tRm2nLsDpeai-6gEcoVvmuMm8GneiGiiLJI4BN2n9hbLrwKE5BG6L3TIUd_baGD0Oy0y-BY4JZ6Lboc3J7fNKgUrcsBc0q5mw_XtW61gInnnPuhlQ2No4PLgKMZK0yq-9j8lfAE9RtfMqvllIyzjZ0LwlRwD1cHwaVeY-sydl4mGNamGx9YnfpnxeIi5_U67uNIXW893UzH6yRyTqdKw
http://36.138.132.240:38989/
admin
nYcRN91r._hj
RabbitMQ的界面

View File

@@ -2,6 +2,11 @@ https://172.31.2.7:39999
eyJhbGciOiJSUzI1NiIsImtpZCI6IkQ2SExTeWxObkxTMllFQ0Z5ZTMwakpaS01FbHFnSzc5RWY0cVJVVXNfOGcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWxrNjRnIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkZmM1YWE4NC05NWRlLTRjOTUtYmVmNi1jOWNlYjdjMzc3ZjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.JOyeNRJHdOMeP1PRfi6x0LMfu6XiuSEIXTVRF0T2xNzv9thdEzjbFZA9j6GI4fWyKZb4IO0xABzjVbkYRW_KIUSosDT0QzngqIq6L1O3gGBgzDKlGpBNFvrbxZgwxcJir_piSP23uvOlMSW9uv4GMLEZixcwNmy_zVVwPbEtrBLSGWwZK9nfa2tYpWesUO70OdWXdP7lW6MRN4i_8RD37-85RY_YD_pYAYGiJtf9cIW6ao630a9RO7nuCDdUuNa9REjYUQqt9ksS_PL277k27e1UBV0yBxKYMxpwhqD4QshfXBMHw56K5F9syGdP7NosBmFmtcHCblEb5nNTHzatJw
http://172.31.2.7:8088/pangu/
szgzadmin
Szgz_admin.123
journalctl -u nfs-server -n 100 -f
journalctl -u nfs-idmapd.service -n 100 -f

View File

@@ -0,0 +1,167 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-tsmpf-cm
namespace: szgz
data:
server_config_docker.ini: >
#minio=mc alias set minIO https://oss.demo.uavcmlc.com:18000/ cmii
B#923fC7mk
minio=mc alias set minIO http://172.31.2.7:9000/ cmii B#923fC7mk
http_port=8192
docker_logs_path=/workspace/logs
docker_temp_folder=/workspace/temp_folder
mp4_file_size=500
upload_url=no used.
query_file_url=no used.
download_url=no_useful_now.
send_complete_msg_url=no used.
logs_path=/workspace/logs
temp_folder=/workspace/temp_folder
#需要排除的合并任务(这些任务由于ts文件不存在导致无法成功)
exclude_task_codes=
#是否执行playback的任务,0:false; 1:true;
merge_playback=1
#下载ts文件方式http, mc
download_type=mc
#仅合成某个公司的文件;(可用于希望优先下载某个公司的文件的场景); 数字半角逗号分割;
only_download_company_ids=
#是否记录磁盘日志; 0:false, 1:true
log_to_disk=1
#(integer)日志级别; 1:error, 2:warn, 3:info, 4:debug
log_level=2
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uav-tsmpf
namespace: szgz
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
emptyDir: {}
- name: server-config-docker
configMap:
name: cmii-uav-tsmpf-cm
items:
- key: server_config_docker.ini
path: server_config_docker.ini
containers:
- name: cmii-uav-tsmpf
image: '172.31.2.7:8033/admin/cmii/ts2mp4_docker_image_arm64:v1.1.0'
command:
- /workspace/ts_file_merge
- /workspace/server_config_docker.ini
ports:
- name: pod-port
containerPort: 8192
protocol: TCP
env:
- name: K8S_NAMESPACE
value: szgz
- name: APPLICATION_NAME
value: cmii-uav-tsmpf
resources:
requests:
cpu: 500m
memory: 2Gi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: szgz/cmii-uav-tsmpf
- name: server-config-docker
mountPath: /workspace/server_config_docker.ini
subPath: server_config_docker.ini
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- szgz
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uav-tsmpf
namespace: szgz
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uav-tsmpf
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8192
targetPort: 8192
nodePort: 38192
selector:
cmii.app: cmii-uav-tsmpf
cmii.type: backend
type: NodePort

View File

@@ -0,0 +1,100 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/klas/swap 和 /dev/klas/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/klas/swap
lvremove /dev/klas/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/klas/root
xfs_growfs /dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo "/dev/vdb does not exist, skipping partition creation"
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""
# 特殊大于2T的磁盘
请写shell命令实现如下的功能
1 请调整磁盘/vda的分区表为GPT格式
2 然后将磁盘剩余空间扩容给/dev/vda3的分区
3 然后扩容在vg datavg
4 然后扩容 将lv docklv
# 1. 调整磁盘 /dev/vda 的分区表为 GPT 格式
parted /dev/vda mklabel gpt
# 2. 将磁盘剩余空间扩容给 /dev/vda3 的分区
parted /dev/vda resizepart 3 100%
# 3. 扩容 vg datavg
pvresize /dev/vda3
# 4. 扩容 lv docklv
lvextend -l +100%FREE /dev/datavg/docklv
resize2fs /dev/datavg/docklv
# 写shell命令实现如下的功能
删除 /etc/fstab中以 /dev/mapper/klas-backup 和 /dev/mapper/klas-swap 开头的行
sed -i '/^\/dev\/mapper\/klas-\(backup\|swap\)/d' /etc/fstab

View File

@@ -0,0 +1,66 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,145 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-nfs-sc-56
provisioner: doris-nfs-sc-56
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-nfs-sc-56
labels:
app: doris-nfs-sc-56
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: doris-nfs-sc-56
strategy:
type: Recreate
template:
metadata:
labels:
app: doris-nfs-sc-56
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: doris-nfs-sc-56
image: 172.16.100.55:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: doris-nfs-sc-56
- name: NFS_SERVER
value: 172.16.100.56
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.16.100.56
path: /var/lib/docker/nfs_data
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-nfs-sc-57
provisioner: doris-nfs-sc-57
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-nfs-sc-57
labels:
app: doris-nfs-sc-57
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: doris-nfs-sc-57
strategy:
type: Recreate
template:
metadata:
labels:
app: doris-nfs-sc-57
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: doris-nfs-sc-57
image: 172.16.100.55:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: doris-nfs-sc-57
- name: NFS_SERVER
value: 172.16.100.57
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.16.100.57
path: /var/lib/docker/nfs_data
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-nfs-sc-58
provisioner: doris-nfs-sc-58
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-nfs-sc-58
labels:
app: doris-nfs-sc-58
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: doris-nfs-sc-58
strategy:
type: Recreate
template:
metadata:
labels:
app: doris-nfs-sc-58
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: doris-nfs-sc-58
image: 172.16.100.55:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: doris-nfs-sc-58
- name: NFS_SERVER
value: 172.16.100.58
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 172.16.100.58
path: /var/lib/docker/nfs_data
---

View File

@@ -0,0 +1,261 @@
nodes:
- address: 172.16.100.55
user: root
role:
- controlplane
- etcd
- worker
internal_address: 172.16.100.55
labels:
ingress-deploy: true
- address: 172.16.100.51
user: root
role:
- worker
internal_address: 172.16.100.51
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.52
user: root
role:
- worker
internal_address: 172.16.100.52
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.53
user: root
role:
- worker
internal_address: 172.16.100.53
labels:
uavcloud.env: "eedsjc"
- address: 172.16.100.56
user: root
role:
- worker
internal_address: 172.16.100.56
labels:
doris-deploy: true
- address: 172.16.100.57
user: root
role:
- worker
internal_address: 172.16.100.57
labels:
doris-deploy: true
- address: 172.16.100.58
user: root
role:
- worker
internal_address: 172.16.100.58
labels:
doris-deploy: true
- address: 172.16.100.59
user: root
role:
- worker
internal_address: 172.16.100.59
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.60
user: root
role:
- worker
internal_address: 172.16.100.60
labels:
uavcloud.env: "eedsjc-uavms"
- address: 172.16.100.62
user: root
role:
- worker
internal_address: 172.16.100.62
labels:
uavcloud.env: "mysql"
mysql-deploy: 'true'
authentication:
strategy: x509
sans:
- "172.16.100.55"
private_registries:
- url: 172.16.100.55:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens18
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# vda 252:0 0 500G 0 disk
# ├─vda1 252:1 0 1G 0 part /boot
# └─vda2 252:2 0 119G 0 part
# ├─klas-root 253:0 0 74.7G 0 lvm /
# ├─klas-swap 253:1 0 7.9G 0 lvm
# └─klas-backup 253:2 0 36.4G 0 lvm
# 系统的磁盘划分为上图所示请给出shell命令实现如下的功能
# 1. 关闭/dev/klas/swap /dev/klas/backup 将vda2的磁空间全部分给/dev/klas/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
# 3. 在pv /dev/vda3上创建 vg datavg大小为全部空间
# 4. 在vg datavg上创建lv docklv大小为全部空间
# 5. 将lv docklv格式化为xfs格式
# 6. 将lv docklv挂载到 /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
# 1. 关闭/dev/kals/swap 和 /dev/kals/backup将vda2的空间全部分给/dev/klas/root
swapoff /dev/kals/swap
echo "y\n" | lvremove /dev/kals/swap
# lvremove /dev/klas/backup
lvextend -l +100%FREE /dev/kals/root
xfs_growfs /dev/kals/root
# 2. 将vda的剩余空间创建 /vda3 分区, 创建 pv /dev/vda3
if ! lsblk | grep -q vdb; then
echo ""
echo "/dev/vdb does not exist, skipping partition creation"
echo ""
df -TH
echo ""
lsblk
echo ""
exit 0
fi
# 1. 调整磁盘 /dev/vdb 的分区表为 GPT 格式
echo yes | parted /dev/vdb mklabel gpt
echo "/dev/vdb exists, starting to partition creation"
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
pvcreate /dev/vdb1
# 3. 在pv /dev/vdb1上创建 vg datavg大小为全部空间
vgcreate datavg /dev/vdb1
# 4. 在vg datavg上创建lv docklv大小为全部空间
lvcreate -l 100%FREE -n docklv datavg
# 5. 将lv docklv格式化为xfs格式
mkfs.xfs /dev/datavg/docklv
# 6. 将lv docklv挂载到 /var/lib/docker
mkdir -p /var/lib/docker
mount /dev/datavg/docklv /var/lib/docker
# 7. 将lv docklv的挂载点设置为永久挂载
echo "/dev/datavg/docklv /var/lib/docker xfs defaults 0 0" >> /etc/fstab
echo ""
df -TH
echo ""
lsblk
echo ""

View File

@@ -0,0 +1,194 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-9000g-pv
spec:
capacity:
storage: 9000Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-2-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.57
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-3-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.58
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-2-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-3-9000g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---

View File

@@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,43 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: doris-static-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: doris-1-500g-pv
spec:
capacity:
storage: 500Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: doris-static-storage
local:
path: /var/lib/docker/doris
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 172.16.100.56
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-1-500g-pvc
spec:
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Gi
volumeMode: Filesystem
---

View File

@@ -0,0 +1,29 @@
#!/bin/bash
ip_list=(
192.168.40.63
192.168.40.60
192.168.40.56
192.168.40.67
192.168.40.59
192.168.40.57
192.168.40.51
192.168.40.58
192.168.40.62
192.168.40.65
192.168.40.61
192.168.40.52
192.168.40.50
)
for ip in ${ip_list[@]}
do
# 使用密码 123 ssh 登录到主机
echo "chpasswd <<< 'root:V2ryStrP@ss'" | ssh root@${ip}
done
ssh root@192.168.40.50 <<< 'scyd@lab1234'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,644 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-iot
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "iot",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-flight-control
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "flight-control",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "secenter",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: zhbf
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "36.141.69.163:8888",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 192.168.233.100:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 192.168.233.100:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,276 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: zhbf
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: zhbf
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "zhbf"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: zhbf
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
auth.client.1.clientid = admin
auth.client.1.password = odD8#Ve7.B
auth.client.2.clientid = cmlc
auth.client.2.password = odD8#Ve7.B
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: zhbf
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 192.168.233.100:8033/cmii/emqx:5.5.1
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zhbf
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: zhbf
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: zhbf
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: zhbf
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: zhbf
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,806 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: zhbf
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dikongzhixingh5)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/iot)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/renyike)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/uavmsmanager)$ $1/ redirect;
rewrite ^(/secenter)$ $1/ redirect;
spec:
rules:
- host: fake-domain.zhbf.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dikongzhixingh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dikongzhixingh5
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /iot/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-iot-manager
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /renyike/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-renyike
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /uavmsmanager/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-manager
servicePort: 9528
- path: /secenter/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-platform-security-center
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: zhbf
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-sky-converge.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-datahub.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-datahub
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-advanced5g.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-advanced5g
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-iot-manager.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-manager
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sec-awareness.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sec-awareness
servicePort: 8080
- host: cmii-uav-security-trace.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-security-trace
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-watchdog.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-watchdog
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: cmii-uavms-pyfusion.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-pyfusion
servicePort: 8080
- host: cmii-uavms-security-center.uavcloud-zhbf.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: zhbf
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header upgradePrefix $http_upgrade;
proxy_set_header Connection "upgradePrefix";
spec:
rules:
- host: fake-domain.zhbf.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- path: /uas/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- path: /converge/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-sky-converge
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: zhbf
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: zhbf
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 192.168.233.100:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,410 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: zhbf
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zhbf
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zhbf
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zhbf
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zhbf
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: zhbf
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zhbf
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity: {}
nodeSelector:
mysql-deploy: "true"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 192.168.233.100:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 192.168.233.100:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/zhbf/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: zhbf
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: zhbf
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: zhbf
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.2.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.2.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 192.168.233.100:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 192.168.233.100:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 192.168.233.100:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 192.168.233.100
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.233.100
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: zhbf
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: zhbf
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: zhbf
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: zhbf
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zhbf
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zhbf
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: zhbf
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zhbf
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 192.168.233.100:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 192.168.233.100:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zhbf
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zhbf
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zhbf
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zhbf
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.233.100:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: zhbf
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zhbf
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: zhbf
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 192.168.233.100:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.zhbf.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: zhbf
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://36.141.69.163:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 192.168.233.100:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 36.141.69.163
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: zhbf/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 192.168.233.100:8033/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL
env:
- name: OSS_ENDPOINT
value: 'http://192.168.233.100:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: zhbf/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 192.168.233.100:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: zhbf
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://36.141.69.163:31935'
rtsp: 'rtsp://36.141.69.163:30554'
srt: 'srt://36.141.69.163:30556'
flv: 'http://36.141.69.163:30500'
hls: 'http://36.141.69.163:30500'
rtc: 'webrtc://36.141.69.163:30080'
replay: 'https://36.141.69.163:30333'
minio:
endpoint: http://192.168.233.100:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,9 @@
#!/bin/bash
for i in {1..8}
do
echo ${i}
# docker stop cmii_minio${i}_1 && docker rm cmii_minio${i}_1
rm -rf /data/minio-pv/pv${i}
done

View File

@@ -0,0 +1,84 @@
### 调查报告
#### 引言
本文详细探讨了如何在不影响 MongoDB 实际数据的情况下,通过删除文件来释放存储空间。基于用户提供的文件列表,我们分析了每个文件的用途,并提供了安全删除的建议。这份报告旨在为用户提供全面的指导,同时强调手动操作的风险,并推荐更安全的替代方法。
#### 文件分析与分类
用户提供的文件列表包括多种类型的文件,如 `WiredTiger.wt``collection-*``index-*` 等。这些文件是 MongoDB 使用 WiredTiger 存储引擎时的数据目录中的典型组成部分。以下是详细分析:
- **实际数据文件**
- `collection-*` 文件(如 `collection-0-1702428478038935225.wt`)存储集合中的文档数据。删除这些文件将导致数据丢失,因此绝对不能删除。
- 示例:`collection-0--1757014832397380742.wt``collection-4--3349146543354658687.wt`
- **索引文件**
- `index-*` 文件(如 `index-5-1702428478038935225.wt`)存储集合的索引。删除这些文件不会影响实际数据,但会移除索引,可能显著降低查询性能,尤其是对于复杂的查询。
- 示例:`index-0--3349146543354658687.wt``index-6--3597060289595503483.wt`
- 研究表明,索引文件的大小可能占数据目录的很大一部分,删除它们可以释放显著的空间,但需要权衡性能影响。
- **元数据和核心文件**
- `WiredTiger.wt` 是数据库的主要元数据文件,存储所有表的配置和检查点信息。删除它会导致数据库无法启动,绝对不能删除。
- `WiredTigerHS.wt` 是事务历史存储文件,用于支持事务完整性,删除可能导致事务相关问题。
- `_mdb_catalog.wt` 是目录文件,包含数据库的元数据,删除会导致数据库不可用。
- `sizeStorer.wt` 存储大小信息,删除可能会影响空间管理,启动时可能出错。
- **锁文件**
- `WiredTiger.lock``mongod.lock` 是锁文件,用于防止多个实例同时访问数据库。手动删除可能导致数据损坏,不建议删除。
- **日志和临时文件**
- `journal` 文件用于写前日志,确保崩溃恢复时的数据一致性。删除它们风险很高,可能导致数据丢失或不一致,不建议手动操作。
- `_tmp` 文件是临时文件,通常用于操作过程中。如果数据库当前未运行或没有正在进行的操作,删除它们可能是安全的。
- `diagnostic.data` 文件包含诊断信息(如服务器状态的二进制格式),用于故障排查。删除它不会影响数据,但可能会影响未来的问题诊断。
- **不明文件**
- `storage.bson` 在标准 MongoDB 数据目录中不常见,可能是用户自定义文件或备份文件。未经确认用途,不建议删除,以免影响数据。
#### 安全删除的建议
基于上述分析,以下是可以在不影响实际数据的情况下考虑删除的文件:
- **`diagnostic.data`**:这是诊断信息文件,不包含实际数据,删除是安全的。如果空间紧张且不需要诊断信息,可以删除。
- **`_tmp` 文件**:这些是临时文件,如果数据库未运行或确认没有正在进行的操作,可以删除。但需注意,删除过程中可能影响正在进行的操作。
#### 删除索引文件的权衡
如果需要更多空间,可以考虑删除 `index-*` 文件,但需注意以下几点:
- 删除索引文件会移除对应的索引,这不会影响实际数据(文档),但会显著降低查询性能,尤其是依赖索引的查询。
- 示例文件:`index-5--3349146543354658687.wt``index-8--3597060289595503483.wt`
- 建议:如果某些索引不再需要(例如,开发环境或不再使用的查询),可以删除对应的文件。但在生产环境中,建议先评估性能影响。
#### 不建议删除的文件
以下文件不建议删除,因为它们对数据库的正常运行至关重要:
- `collection-*` 文件:包含实际数据,删除会导致数据丢失。
- `WiredTiger.wt``WiredTigerHS.wt``sizeStorer.wt`:这些是核心元数据和历史文件,删除会导致数据库无法启动或数据不一致。
- `journal` 文件:用于崩溃恢复,删除可能导致数据丢失。
- `WiredTiger.lock``mongod.lock`:锁文件,删除可能导致多实例访问冲突。
#### 替代方法:使用 `compact` 命令
手动删除文件存在风险,推荐使用 MongoDB 的 `compact` 命令来安全回收空间。该命令会重写数据和索引,释放未使用的磁盘空间,尤其适合 WiredTiger 存储引擎。操作步骤如下:
1. 连接到 MongoDB 实例,使用 `mongosh`
2. 对每个集合运行 `db.collection.compact()`,例如 `db.myCollection.compact()`
3. 注意:`compact` 操作需要额外的磁盘空间(约 2GB并会锁定数据库建议在维护窗口期间执行。
有关详细信息,请参考 [MongoDB 官方文档](https://www.mongodb.com/docs/manual/tutorial/compact-collections/)
#### 风险与注意事项
- **手动删除的风险**MongoDB 的文件系统高度集成,删除错误文件可能导致数据库损坏或数据丢失。建议在操作前备份所有文件。
- **诊断信息的重要性**`diagnostic.data` 文件虽然可以删除,但如果未来需要排查问题,可能会缺少关键信息。
- **性能影响**:删除索引文件会影响查询性能,建议在删除前评估业务需求。
#### 表格:文件类型与删除建议
| 文件类型 | 示例文件 | 包含内容 | 是否可以删除 | 影响 |
|-------------------|-----------------------------------|------------------------------|--------------------|--------------------------|
| 集合数据文件 | `collection-0-1702428478038935225.wt` | 实际文档数据 || 数据丢失 |
| 索引文件 | `index-5-1702428478038935225.wt` | 索引数据 | 是(谨慎) | 查询性能下降 |
| 元数据文件 | `WiredTiger.wt` | 数据库配置和检查点 || 数据库无法启动 |
| 历史存储文件 | `WiredTigerHS.wt` | 事务历史 || 事务完整性问题 |
| 临时文件 | `_tmp` | 临时操作数据 | 是(数据库未运行时)| 可能影响正在进行的操作 |
| 诊断文件 | `diagnostic.data` | 服务器状态信息 || 未来诊断可能困难 |
| 日志文件 | `journal` | 写前日志,用于崩溃恢复 || 数据不一致或丢失 |
| 锁文件 | `WiredTiger.lock` | 防止多实例访问 || 数据损坏风险 |
| 大小存储文件 | `sizeStorer.wt` | 空间大小信息 || 启动可能失败 |
| 不明文件 | `storage.bson` | 未知,可能是自定义文件 | 否(未经确认) | 可能影响数据 |
#### 结论
为了释放空间,建议首先删除 `diagnostic.data``_tmp` 文件,这些操作相对安全,不会影响实际数据。如果需要更多空间,可以考虑删除 `index-*` 文件,但需权衡性能影响。强烈建议避免手动删除 `journal` 文件和其他核心文件,并优先使用 `compact` 命令来安全回收空间。

View File

@@ -0,0 +1,9 @@
#!/bin/bash
all_server_list=(192.168.37.132 192.168.37.133 192.168.37.134 192.168.37.135 192.168.37.136 192.168.37.137 192.168.37.138)
for server in "${all_server_list[@]}";do
echo "server is ${server}"
ssh root@"${server}" "df -TH | awk 'NR>1 {split(\$6, a, \"%\"); if (a[1] > 70) print \$0}'"
echo ""
done

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: xmyd
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: xmyd
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: xmyd
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: xmyd
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.2.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,67 @@
ALTER TABLE uav_lifecycle.bridge_device ADD COLUMN pid bigint NULL COMMENT '针对察打一体设备非察打设备子设备一律为null是察打子设备表示上级察打设备的id' AFTER `function`;
ALTER TABLE uav_lifecycle.bridge_device ADD COLUMN level int NULL COMMENT '设备的等级0表示主设备1表示子设备主要针对察打一体设备' AFTER pid;
ALTER TABLE uav_lifecycle.bridge_device ADD COLUMN device_height float NULL COMMENT '设备对地高度' AFTER update_by;
ALTER TABLE uav_lifecycle.bridge_device_pool ADD COLUMN device_local_altitude double NULL COMMENT '设备所在地面海拔高度' AFTER device_altitude;
ALTER TABLE uav_lifecycle.bridge_device_pool ADD COLUMN pid bigint NULL COMMENT '针对察打一体设备非察打设备子设备一律为null是察打子设备表示上级察打设备的id' AFTER `function`;
ALTER TABLE uav_lifecycle.bridge_device_pool ADD COLUMN level int NULL COMMENT '设备的等级0表示主设备1表示子设备主要针对察打一体设备' AFTER pid;
ALTER TABLE uav_lifecycle.bridge_device_pool ADD COLUMN device_height float NULL COMMENT '设备对地高度' AFTER update_by;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN is_fusion bit(1) NULL COMMENT '是否融和' AFTER area_range;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN handling_area text NULL COMMENT '处置区域' AFTER update_by;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN warning_area text NULL COMMENT '预警区域' AFTER handling_area;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN longitude double NULL COMMENT '经度' AFTER warning_area;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN latitude double NULL COMMENT '纬度' AFTER longitude;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN extension text NULL COMMENT '防区扩展字段' AFTER latitude;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN guard_mode int NULL COMMENT '值守模式0-人工值守1-无人值守)' AFTER extension;
ALTER TABLE uav_lifecycle.regulator_defense_zone ADD COLUMN ground_altitude double NULL COMMENT '防区地面海拔高度' AFTER guard_mode;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_type int NULL COMMENT '诱骗类型(0-方向诱骗1-角度诱骗,2-轨迹诱骗3-位置诱骗,4-模式诱骗5-导航屏蔽, 6-停止诱骗)' AFTER create_by;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_direction int NULL COMMENT '诱骗策略参数0-正北、1-正东、2-正南、3-正西、4-东北、5-东南、6-西南、7-西北)' AFTER induce_type;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN interfere_mode int NULL COMMENT '干扰模式(1-驱离模式、2-迫降模式)' AFTER induce_direction;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN frequency text NULL COMMENT '频段设置0-1.5G1-2.4G, 2-5.8G, 3-840M' AFTER interfere_mode;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_angle int NULL COMMENT '诱骗角度(0-360)' AFTER frequency;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_track_type int NULL COMMENT '诱骗轨迹类型0-原地盘旋' AFTER induce_angle;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN location_induce_type int NULL COMMENT '位置诱骗类型(0-迫降)' AFTER induce_track_type;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_latitude double NULL COMMENT '诱骗纬度' AFTER location_induce_type;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN induce_longitude double NULL COMMENT '诱骗经度' AFTER induce_latitude;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN mode_induce_type int NULL COMMENT '模式诱骗类型(1-向外驱离、2-向内拉近)' AFTER induce_longitude;
ALTER TABLE uav_lifecycle.regulator_defense_zone_device_mapping ADD COLUMN interfere_time int NULL COMMENT '干扰时长(10-300)' AFTER mode_induce_type;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN imei varchar(100) NULL COMMENT '国际移动设备识别码' AFTER code_type;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN imsi varchar(100) NULL COMMENT '国际移动用户识别码' AFTER imei;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN sn varchar(100) NULL COMMENT '终端设备号' AFTER imsi;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN failed_type int NULL COMMENT '失败原因(0: SIM卡号错误 1: SN错误 2: 都错)' AFTER device_status;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN device_cert_status int NULL COMMENT '设备证书状态(0: 未激活 1: 激活 2: 失败)' AFTER failed_type;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN device_sim_status int NULL COMMENT '设备SIM状态(0: 未激活 1: 激活 2: 失败)' AFTER device_cert_status;
ALTER TABLE uav_lifecycle.uav_detail ADD COLUMN online_at datetime NULL COMMENT '最近激活时间' AFTER is_del;
ALTER TABLE uav_user.flyer_info ADD COLUMN gender varchar(8) NULL COMMENT '性别' AFTER flyer_name;
ALTER TABLE uav_user.flyer_info ADD COLUMN job_cat varchar(64) NULL COMMENT '职务类别' AFTER gender;
ALTER TABLE uav_user.flyer_info ADD COLUMN job_level varchar(64) NULL COMMENT '职级' AFTER job_cat;
ALTER TABLE uav_user.flyer_info ADD COLUMN cert_cat varchar(64) NULL COMMENT '证件类型' AFTER job_level;
ALTER TABLE uav_user.flyer_info ADD COLUMN issue_auth varchar(64) NULL COMMENT '发证机关' AFTER cert_cat;
ALTER TABLE uav_user.flyer_info ADD COLUMN drive_lic_type varchar(64) NULL COMMENT '驾驶证类别' AFTER issue_auth;
ALTER TABLE uav_user.flyer_info ADD COLUMN longitude decimal(9,6) NULL COMMENT '经度' AFTER update_at;
ALTER TABLE uav_user.flyer_info ADD COLUMN latitude decimal(8,6) NULL COMMENT '纬度' AFTER longitude;
ALTER TABLE admin_data.uav_plane_model CHANGE COLUMN def_date def_date timestamp(3) NOT NULL COMMENT '创建时间';
ALTER TABLE admin_data.uav_plane_model CHANGE COLUMN upd_date upd_date timestamp(3) NOT NULL COMMENT '更新时间';
ALTER TABLE uav_alarm.uav_alarm_event CHANGE COLUMN event_start_time event_start_time timestamp(3) NOT NULL COMMENT '告警时间';
ALTER TABLE uav_alarm.uav_alarm_event CHANGE COLUMN event_end_time event_end_time timestamp(3) NULL COMMENT '告警结束时间';
ALTER TABLE uav_alarm.uav_alarm_event CHANGE COLUMN def_date def_date timestamp(3) NOT NULL COMMENT '创建时间';
ALTER TABLE uav_alarm.uav_alarm_event CHANGE COLUMN upd_date upd_date timestamp(3) NOT NULL COMMENT '更新时间';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN alarm_content alarm_content varchar(200) NOT NULL COMMENT '告警内容【指令名称、对象名称】';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN alarm_start_time alarm_start_time timestamp(3) NOT NULL COMMENT '告警时间';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN report_time report_time timestamp(3) NULL COMMENT '上报时间';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN alarm_end_time alarm_end_time timestamp(3) NULL COMMENT '告警结束时间';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN def_date def_date timestamp(3) NOT NULL COMMENT '创建时间';
ALTER TABLE uav_alarm.uav_sec_alarm_event CHANGE COLUMN upd_date upd_date timestamp(3) NOT NULL COMMENT '更新时间';
ALTER TABLE uav_lifecycle.ky_flight_act_geo_detail CHANGE COLUMN geo_num4s geo_num4s mediumtext NULL COMMENT '二维网格码列表,将区域栅格化为网格编码列表';
ALTER TABLE uav_industrial_portfolio.civil_military_alarm CHANGE COLUMN end_time end_time datetime(6) NULL COMMENT '结束时间';
ALTER TABLE uav_lifecycle.uav_sec_alarm_event CHANGE COLUMN report_time report_time timestamp(3) NULL COMMENT '上报时间';
ALTER TABLE uav_industrial_portfolio.civil_military_bts_info CHANGE COLUMN bts_radius bts_radius decimal(8,1) NULL COMMENT '探测器探测半径';
ALTER TABLE uav_industrial_portfolio.civil_military_bts_record_info CHANGE COLUMN bts_radius bts_radius decimal(8,1) NULL COMMENT '探测器探测半径';
ALTER TABLE uav_lifecycle.regulator_defense_zone CHANGE COLUMN upper_height upper_height decimal(10,1) NULL COMMENT '防区最高高度';
ALTER TABLE uav_lifecycle.uav_sec_alarm_event CHANGE COLUMN alarm_content alarm_content varchar(200) NOT NULL COMMENT '告警内容【指令名称、对象名称】';
ALTER TABLE uav_lifecycle.uav_sec_alarm_event CHANGE COLUMN alarm_start_time alarm_start_time timestamp(3) NOT NULL COMMENT '告警时间';
ALTER TABLE uav_industrial_portfolio.civil_military_alarm CHANGE COLUMN begin_time begin_time datetime(6) NULL COMMENT '开始时间';
ALTER TABLE uav_lifecycle.uav_sec_alarm_event CHANGE COLUMN alarm_end_time alarm_end_time timestamp(3) NULL COMMENT '告警结束时间';
ALTER TABLE uav_iot_dispatcher.cmii_uav_servo_device CHANGE COLUMN camera_project_id camera_project_id varchar(32) NOT NULL COMMENT '摄像头的项目所属id';
ALTER TABLE uav_lifecycle.warn_item_his CHANGE COLUMN uas_id uas_id varchar(128) NOT NULL COMMENT '无人机识别码或探测目标id';
ALTER TABLE uav_iot_dispatcher.cmii_uav_perception_task CHANGE COLUMN project_id project_id varchar(32) NOT NULL COMMENT '项目编号';
ALTER TABLE uav_lifecycle.warn_item CHANGE COLUMN uas_id uas_id varchar(128) NOT NULL COMMENT '无人机识别码或探测目标id';

View File

@@ -0,0 +1,2 @@
ALTER TABLE uav_lifecycle.bridge_device ADD INDEX idx_device_pid(pid,is_del);
ALTER TABLE uav_lifecycle.bridge_device_pool ADD INDEX idx_device_pid(pid,is_del);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,315 @@
SET FOREIGN_KEY_CHECKS = 0;
use uav_dev;
CREATE TABLE `company_sort_order` (
`company_id` int NOT NULL COMMENT '公司id',
`company_name` varchar(254) DEFAULT NULL COMMENT '公司名称',
`sort` int NOT NULL COMMENT '排序顺序 ,从小到大',
PRIMARY KEY (`company_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='定制化公司排序顺序 ';
use uav_dev;
CREATE TABLE `uav_auth_5g` (
`id` bigint NOT NULL COMMENT '主键id',
`uav_code` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '中移凌云无人机编码',
`gpsi` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT 'gpsi',
`is_del` bit(1) DEFAULT b'0' COMMENT '是否删除',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`create_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '创建人',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
`update_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '更新人',
`company_id` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '公司id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC COMMENT='无人机认证表';
use uav_dev;
CREATE TABLE `uav_auth_log` (
`id` bigint NOT NULL COMMENT '主键id',
`uav_code` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '认证无人id',
`uav_id_5g` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '认证核心网无人机id',
`auth_result` varchar(16) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '认证结果',
`auth_time` datetime DEFAULT NULL COMMENT '认证时间',
`gpsi` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT 'gpsi',
`location_area_5g` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin COMMENT '5G的基站地址',
`company_id` varchar(100) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '公司id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC COMMENT='无人机认证日志表';
use uav_industrial_portfolio;
CREATE TABLE `civil_military_alarm_his` (
`id` bigint NOT NULL COMMENT '告警id',
`aerocraft_type` int DEFAULT NULL COMMENT '飞行器类型0云平台无人机1基站探测到的飞行器',
`aerocraft_id` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '飞行器id',
`area_type` int DEFAULT NULL COMMENT '告警的区域类型0规划区域告警1基站探测区域告警',
`order_id` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '飞行编号,仅无人机使用',
`radar_id` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '基站雷达id基站告警使用',
`target_type` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '探测到的目标类型',
`event` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '入侵事件闯入INTRUDE',
`status` int DEFAULT NULL COMMENT '告警状态0告警中1:告警结束',
`company_id` bigint DEFAULT NULL COMMENT '公司id',
`begin_time` datetime(6) DEFAULT NULL COMMENT '开始时间',
`end_time` datetime(6) DEFAULT NULL COMMENT '结束时间',
`end_type` int DEFAULT NULL COMMENT '结束类型0自动结束1手动结束',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
`create_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '创建用户id如果是触发则为空',
`update_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '更新用户id',
`is_del` bit(1) DEFAULT b'0' COMMENT '是否删除',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='通感告警历史记录';
use uav_industrial_portfolio;
CREATE TABLE `civil_military_statisic_bak` (
`id` bigint NOT NULL COMMENT '主键id',
`bts_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '基站id',
`bts_code` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '基站编码',
`dimension` int DEFAULT '0' COMMENT '维度0:全部 1:年 2:月 3:周 4:日 ',
`dimension_value` int DEFAULT NULL COMMENT '2024月202401日20240101',
`total` int DEFAULT NULL COMMENT '探测总架次',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`update_at` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`),
KEY `idx_bts_statistic_dim` (`bts_code`,`dimension`,`dimension_value`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='基站探测统计表';
use uav_industrial_portfolio;
CREATE TABLE `police_device_info` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键ID',
`device_id` varchar(225) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '设备id',
`device_name` varchar(225) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '设备名称',
`deleted` int DEFAULT NULL COMMENT '删除标记',
`company_id` bigint DEFAULT NULL COMMENT '公司ID',
`wgs84_lon` bigint DEFAULT NULL COMMENT 'WGS84坐标系经度',
`wgs84_lat` bigint DEFAULT NULL COMMENT 'WGS84坐标系纬度',
`alerter` varchar(225) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '告警人',
`alerter_telephone` varchar(225) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '告警电话',
`police_station` varchar(225) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '所属派出所',
`create_timestamp` bigint DEFAULT NULL COMMENT '创建信息时间戳',
`update_timestamp` bigint DEFAULT NULL COMMENT '最后更新时间戳',
`crime_address` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT NULL COMMENT '案发地址',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1907599916368584706 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci COMMENT='设备信息表';
use uav_industrial_portfolio;
CREATE TABLE `violation_record` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '记录id',
`label` int DEFAULT NULL COMMENT '1 发现垃圾, 2 违停占用 3 人员入侵',
`address` varchar(254) DEFAULT NULL COMMENT '记录地址,用户输入',
`dealt` int DEFAULT '0' COMMENT '是否处理 0 为处理,1 已处理,null 是没有处理',
`image_file_id` varchar(255) DEFAULT NULL COMMENT '视频截图文件id',
`latitude02` double DEFAULT NULL COMMENT '截图的纬度02',
`longitude02` double DEFAULT NULL COMMENT '截图的经度02',
`altitude` double DEFAULT NULL COMMENT '截图的海拔高度',
`latitude84` double DEFAULT NULL COMMENT '截图的纬度84',
`longitude84` double DEFAULT NULL COMMENT '截图的经度84',
`company_id` int NOT NULL COMMENT '公司id',
`deleted` int NOT NULL DEFAULT '0' COMMENT '0 未删除 1 删除',
`create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=73 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='违规记录表';
use uav_lifecycle;
CREATE TABLE `bridge_device_mqtt_info` (
`id` bigint NOT NULL COMMENT '序列',
`provider_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '设备提供商id',
`device_type` int DEFAULT NULL COMMENT '设备类型;0、5G-A基站1、合作无人机2、监管无人机3、RemoteID无人机4、雷达设备5、频谱设备6、光电设备7、反制设备8、诱骗设备9、干扰设备10、指挥车11、察打一体999、其他设备',
`mqtt_user` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT 'MQTT账号名',
`mqtt_password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT 'MQTT密码',
`topic` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT 'MQTT的topic;设备的topic 为:\r\n设备本身参数数据的Topic,bridge/{provider_code}/device/${device_type}/{deviceId},\r\n设备实时数据的Topicbridge/{provider_code}/device_data/${device_type}/{deviceId}',
`create_by` bigint DEFAULT NULL COMMENT '创建人',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`update_by` bigint DEFAULT NULL COMMENT '更新人',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='设备topic相关信息';
use uav_lifecycle;
CREATE TABLE `bridge_statistic` (
`id` bigint NOT NULL COMMENT 'id',
`topic` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '数据的来源或去处',
`point` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '统计指标点',
`dimension` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '统计维度deviceId/objectId...',
`dimension_value` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '统计维度值:{deviceId}/{objectId}',
`count` bigint DEFAULT NULL COMMENT '总数',
`success_delay` bigint DEFAULT NULL COMMENT '总延迟',
`min_delay` bigint DEFAULT NULL COMMENT '最低延迟',
`max_delay` bigint DEFAULT NULL COMMENT '最大延迟',
`success_count` bigint DEFAULT NULL COMMENT '成功总数,发布有成功与失败,订阅只有成功',
`failure_count` bigint DEFAULT NULL COMMENT '失败总数',
`time` datetime(6) DEFAULT NULL COMMENT '统计时间',
PRIMARY KEY (`id`),
KEY `idx_statistic_comp_id` (`topic`,`point`,`dimension`,`dimension_value`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='bridge统计表';
use uav_lifecycle;
CREATE TABLE `external_uav_detect_data` (
`id` bigint NOT NULL COMMENT '自增ID',
`type` varchar(100) NOT NULL COMMENT '数据类型detect-轨迹nofly-预警")',
`param` json DEFAULT NULL COMMENT '参数体',
`def_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建日期',
PRIMARY KEY (`id`),
KEY `idx_external_uav_detect_data_type` (`type`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='无人机轨迹预测历史表';
use uav_lifecycle;
CREATE TABLE `photoelectric_alarm` (
`id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '告警ID',
`camera_id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '摄像头ID',
`thumbnail_file_id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '缩略图文件ID',
`alarm_time` datetime DEFAULT NULL COMMENT '告警时间',
`ai_type` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '目标类型bird、drone',
`bird` int DEFAULT NULL COMMENT '探测目标数量',
`drone` int DEFAULT NULL COMMENT '探测目标数量',
`file_id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '文件ID',
`deleted` tinyint(1) DEFAULT '0' COMMENT '是否删除,0:否1:是',
`company_id` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '公司ID',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`create_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '创建者',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
`update_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '更新人',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ROW_FORMAT=DYNAMIC COMMENT='摄像头告警表';
use uav_lifecycle;
CREATE TABLE `regulator_defense_zone_choose` (
`id` bigint NOT NULL COMMENT '主键',
`defense_zone_ids` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '防区id集合',
`regulator_id` bigint NOT NULL COMMENT '监管员id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='用户防区选择映射表';
use uav_lifecycle;
CREATE TABLE `regulator_defense_zone_strategy` (
`id` bigint NOT NULL COMMENT '主键id',
`defense_zone_id` bigint NOT NULL COMMENT '防区id',
`create_at` datetime(6) NOT NULL COMMENT '创建时间',
`create_by` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL COMMENT '创建用户id',
`defense_type` int DEFAULT NULL COMMENT '防区类型0核心区1处置区2预警区',
`sms_notice` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin COMMENT '闯入短信通知:接收人',
`notice_time` int DEFAULT '10' COMMENT '系统提示音(1-10000)',
`device_ids` text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin COMMENT '无人值守设备id集合',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='防区策略表';
use uav_lifecycle;
CREATE TABLE `regulator_detection_configure_info` (
`id` bigint NOT NULL COMMENT '主键id',
`ref_lat` decimal(20,7) DEFAULT NULL COMMENT '参考点02纬度',
`ref_lng` decimal(20,7) DEFAULT NULL COMMENT '参考点02经度',
`ref_wgs_lat` decimal(20,7) DEFAULT NULL COMMENT '参考点84纬度',
`ref_wgs_lng` decimal(20,7) DEFAULT NULL COMMENT '参考点84经度',
`configs` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci COMMENT '配置',
`keep_duration` bigint DEFAULT NULL COMMENT '保存时间',
`suppression_num` int DEFAULT NULL COMMENT '压制点数',
`user_id` bigint DEFAULT NULL COMMENT '用户id',
`user_name` varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL COMMENT '用户名称',
`create_time` datetime DEFAULT NULL COMMENT '创建时间',
`modify_time` datetime DEFAULT NULL COMMENT '更新时间',
`deleted` bit(1) DEFAULT b'0' COMMENT '是否删除',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC COMMENT='参考点等信息的配置';
use uav_lifecycle;
CREATE TABLE `regulator_detection_statistic` (
`id` bigint NOT NULL COMMENT '主键id',
`device_comp_id` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL COMMENT '设备组合id',
`provider_code` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL COMMENT '提供商编码',
`device_type` int DEFAULT NULL COMMENT '设备类型',
`device_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL COMMENT '设备id',
`dimension` int DEFAULT NULL COMMENT '统计纬度0全部1234',
`dimension_value` int DEFAULT NULL COMMENT '统计纬度值',
`total` int DEFAULT NULL COMMENT '统计值',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`) USING BTREE,
KEY `idx_device_comp_id` (`device_comp_id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC COMMENT='设备探测统计表';
use uav_lifecycle;
CREATE TABLE `regulator_detection_track` (
`id` bigint NOT NULL COMMENT '主键id',
`device_id` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL COMMENT '设备id',
`provider_code` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL COMMENT '提供商编码',
`device_type` int DEFAULT NULL COMMENT '设备类型',
`track_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci NOT NULL COMMENT '目标id',
`create_at` datetime DEFAULT NULL COMMENT '创建时间',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci ROW_FORMAT=DYNAMIC COMMENT='设备探测统计表';
use uav_lifecycle;
CREATE TABLE `regulator_radiomap` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键id',
`create_at` date DEFAULT NULL COMMENT '创建时间',
`Longitude` double DEFAULT NULL COMMENT '经度',
`Latitude` double DEFAULT NULL COMMENT '纬度',
`height` double DEFAULT NULL COMMENT '高度',
`value` double DEFAULT NULL COMMENT 'RSRP信号值',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=70197 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='Radiomap数据';
use uav_lifecycle;
CREATE TABLE `regulator_warning_instance` (
`id` bigint NOT NULL COMMENT '监管警情表',
`regulator_defense_zone_id` bigint DEFAULT NULL COMMENT '监管防区id',
`uav_model` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '发现无人机型号',
`uav_sn` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '发现无人机SN',
`uav_address` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '无人机地址',
`identity` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '产品唯一识别码',
`longitude` double DEFAULT NULL COMMENT '经度',
`latitude` double DEFAULT NULL COMMENT '纬度',
`pilot_lon` double DEFAULT NULL COMMENT '飞手经度',
`pilot_lat` double DEFAULT NULL COMMENT '飞手纬度',
`pilot_address` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '飞手地址',
`height` double DEFAULT NULL COMMENT '高度',
`telephone` varchar(128) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '通知手机号',
`is_del` int NOT NULL DEFAULT '0' COMMENT '用户是否被删除01删除',
`send_flag` int DEFAULT NULL COMMENT '是否发送成功0失败1:成功',
`create_at` datetime(6) DEFAULT NULL COMMENT '创建时间',
`create_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '创建用户',
`update_at` datetime(6) DEFAULT NULL COMMENT '更新时间',
`update_by` varchar(64) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL COMMENT '更新用户',
PRIMARY KEY (`id`),
KEY `idx_warning_instance_createBy` (`create_by`) /*!80000 INVISIBLE */
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin COMMENT='上报遥控器警情表';
use uav_mission;
CREATE TABLE `drone_info` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '主键id,自增',
`uav_name` varchar(100) NOT NULL COMMENT '无人机名称',
`uav_code` varchar(20) NOT NULL COMMENT '无人机编号',
`link_id` varchar(36) NOT NULL COMMENT '多机飞行任务id',
`create_id` varchar(100) DEFAULT NULL COMMENT '创建人',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`deleted` int(1) unsigned zerofill DEFAULT '0' COMMENT '0保留,1假删除',
`sorted` int NOT NULL COMMENT '排序',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=589 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='多机任务-无人机信息';
use uav_mission;
CREATE TABLE `police_case_action` (
`id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键自增id',
`uas_id` varchar(254) DEFAULT NULL COMMENT '无人机id',
`action` varchar(32) DEFAULT NULL COMMENT '操作指令',
`action_time` datetime DEFAULT NULL COMMENT '操作时间',
`action_detail` varchar(254) DEFAULT NULL COMMENT '操作指令详情,补充信息',
`police_case_code` varchar(64) DEFAULT NULL COMMENT '警情编号',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`deleted` int NOT NULL DEFAULT '0' COMMENT '是否删除',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_id` (`id`),
KEY `idx_uas_id` (`uas_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='警情信息操作记录表';
use uav_mission;
CREATE TABLE `smart_link_mission` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '主键id,自增',
`mission_name` varchar(100) NOT NULL COMMENT '作业名称',
`user_id` varchar(100) NOT NULL COMMENT '用户Id',
`company_id` varchar(100) NOT NULL COMMENT '公司Id',
`create_id` varchar(100) DEFAULT NULL COMMENT '创建人',
`update_id` varchar(100) DEFAULT NULL COMMENT '更新人',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` datetime DEFAULT NULL COMMENT '更新时间',
`deleted` int(1) unsigned zerofill DEFAULT '0' COMMENT '0保留,1假删除',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=117 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='智能网联作业信息关联表';
use uav_user;
CREATE TABLE `sys_zhongtai_user_mapping` (
`id` int NOT NULL AUTO_INCREMENT COMMENT '主键',
`user_id` int DEFAULT NULL COMMENT '用户id',
`app_code` varchar(100) DEFAULT NULL COMMENT '待开通用户的能力应用编码。该编码为中台分配的应用编码',
`open_userId_aaaS` varchar(100) DEFAULT NULL COMMENT '订单用户的中台账号Open user ID',
`account_type_aaaS` varchar(10) DEFAULT NULL COMMENT '需要开通的账号类型',
`open_userId_sub_account` varchar(100) DEFAULT NULL COMMENT '中台子账号Open user ID。当accountTypeAaaS=1时必填',
`open_userId_app_account` varchar(100) DEFAULT NULL COMMENT '中台应用从账号Open user ID当accountTypeAaaS=2时必填',
`user_name` varchar(100) DEFAULT NULL COMMENT '帐号姓名',
`org_code` varchar(100) DEFAULT NULL COMMENT '帐号归属的组织编码。如果是平台或平台类订购utilityCode不为空abilityCode为空本编码orgCode可以为空。\r\n" + "如果是能力订购abilityCode不为空 utilityCode为空本编码orgCode不可以为空。',
`service_org_code` varchar(100) DEFAULT NULL COMMENT '帐号归属的服务机构编码',
`mobile` varchar(100) DEFAULT NULL COMMENT '手机号',
`accountDesc` varchar(100) DEFAULT NULL COMMENT '帐号描述',
`sub_order_item_list` varchar(500) DEFAULT NULL COMMENT '账号的订购集合',
`create_at` datetime DEFAULT NULL COMMENT '创建事件',
`update_at` datetime DEFAULT NULL COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci COMMENT='中台账号映射表';
SET FOREIGN_KEY_CHECKS = 1;

View File

@@ -0,0 +1,7 @@
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-36027-040202
harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-250331
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-34673-040101
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-hbgs

View File

@@ -0,0 +1,51 @@
#!/bin/bash
# 数据表备份
export mysql_exec_file_prefix=/root/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin
export mysql_port=33306
export SQL_DUMP_FILE=/root/wdd/update_6.0.0_6.2.0/all_tables_6.0.0.sql
export SQL_FULL_BACK_UP_FILE=/root/wdd/update_6.0.0_6.2.0/all_tables_6.0.0_20250403_fullback.sql
export SQL_DDL_FILE="/root/update_6.0.0_6.2.0/厦门5.7.0~6.0.0"
backup_all_structure() {
echo ""
echo ""
echo "start to backup all-table-structure!"
$mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs $mysql_exec_file_prefix/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port --single-transaction --source-data=2 --hex-blob --triggers --routines --events --no-data --set-gtid-purged=OFF --databases >"${SQL_DUMP_FILE}"
echo ""
}
backup_all_dump() {
echo ""
echo ""
echo ""
echo "start to backup all-table-database-data!"
$mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs $mysql_exec_file_prefix/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port --single-transaction --source-data=2 --hex-blob --triggers --routines --events --set-gtid-purged=OFF --databases >"${SQL_FULL_BACK_UP_FILE}"
echo ""
}
import_ddl_sql(){
echo ""
echo ""
echo "start to import ddl sql !"
for sql_file in $(ls "$SQL_DDL_FILE" | sort -n -k1.1,1.2); do
echo "current file is ${sql_file}"
sudo $mysql_exec_file_prefix/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P$mysql_port <"$SQL_DDL_FILE/${sql_file}"
echo "------------------"
echo ""
done
echo ""
}
backup_all_structure
backup_all_dump
/root/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < new_table.sql
/root/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < new_schema.sql
/root/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < new_table_index.sql
/root/mysql-8.0.27-linux-glibc2.17-x86_64-minimal/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 < new_table_column.sql

View File

@@ -0,0 +1,2 @@
OpenSSH 安全漏洞(CVE-2023-51767)

View File

@@ -0,0 +1,69 @@
### 🤖 Assistant
以下是升级 Ubuntu 20.04.4 中 OpenSSH 版本的详细步骤,结合了公开资源和核心技术要点:
---
### **核心步骤概览**
1. **依赖准备**
- 升级前需同步更新 OpenSSLOpenSSH 的加密依赖):
```bash
wget https://github.com/openssl/openssl/releases/download/openssl-3.0.16/openssl-3.0.16.tar.gz
tar -zxvf openssl-3.0.16.tar.gz
cd openssl-3.0.16
./config --prefix=/usr/local/openssl --shared # 关键参数 `--shared`
make && make install
```
需手动链接库文件(解决动态库路径问题):
```bash
ln -s /usr/local/include/openssl /usr/include/openssl
ln -sf /usr/local/lib64/libssl.so /usr/local/openssl/lib64/libssl.so
echo "/usr/local/openssl/lib64" > /etc/ld.so.conf.d/openssl.conf
ldconfig # 更新库缓存
```
来源:[<sup>1</sup>](https://blog.csdn.net/weixin_44293618/article/details/128096741)、[<sup>2</sup>](https://www.cnblogs.com/subsea/p/17682962.html)
链接openssl的二进制文件
- ln -sf /usr/local/openssl/bin/openssl /usr/bin/openssl
2. **升级 OpenSSH**
- 下载并编译最新版 OpenSSH如 9.3p2
```bash
wget https://cloudflare.cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-9.3p2.tar.gz
tar -zxvf openssh-9.3p2.tar.gz
cd openssh-9.3p2
./configure --prefix=/usr/local/openssh --with-ssl-dir=/usr/local/openssl
make && make install
```
- 替换系统默认二进制文件:
```bash
mv /usr/bin/ssh /usr/bin/ssh.bak
ln -s /usr/local/openssh/bin/ssh /usr/bin/ssh
```
3. **服务配置与测试**
- 重启 SSH 服务并验证版本:
```bash
systemctl restart sshd
ssh -V # 应显示新版本号
```
- **注意事项**
- 操作前确保通过 `telnet` 或本地终端保持会话,避免升级失败导致无法远程连接(参考 [<sup>3</sup>](https://blog.csdn.net/lqh_linux/article/details/128796892))。
- 若遇到 `known_hosts` 报错,可清除 `~/.ssh/known_hosts` 中对应 IP 的记录([深信服社区](https://bbs.sangfor.com.cn/forum.php?mod=viewthread&tid=128631))。
---
### **常见问题解决**
- **符号链接错误**
若报错 `relocation error: symbol EVP_mdc2 not defined`,检查 OpenSSL 库路径是否正确链接,并重新执行 `ldconfig`[<sup>2</sup>](https://www.cnblogs.com/subsea/p/17682962.html))。
- **权限问题**
升级后若无法 root 登录,需修改 `/etc/ssh/sshd_config` 中的 `PermitRootLogin yes` 并重启服务([<sup>4</sup>](https://www.cnblogs.com/miaojx/p/14713244.html))。
---
### **总结**
- 核心依赖:升级 OpenSSL 时务必使用 `--shared` 参数并正确配置库路径。
- 风险控制:保留备份会话,避免升级中断导致失联。
- 验证方式:通过 `ssh -V` 和实际连接测试确认版本更新。
如需更详细的配置参数或故障排查,可参考上述来源链接。

View File

@@ -0,0 +1,89 @@
### 🤖 Assistant
在Ubuntu系统中可以通过以下方法查找OpenSSL的库文件`.so`文件)和安装路径:
---
### **1. 查找OpenSSL可执行文件路径**
```bash
which openssl
```
输出示例:
`/usr/bin/openssl`
---
### **2. 查找OpenSSL动态库文件路径`.so`文件)**
#### 方法1通过 `ldconfig` 查询
```bash
ldconfig -p | grep libssl.so
```
输出示例:
```
libssl.so.3 (libc6,x86-64) => /usr/lib/x86_64-linux-gnu/libssl.so.3
libssl.so (libc6,x86-64) => /usr/lib/x86_64-linux-gnu/libssl.so
```
#### 方法2全局搜索文件
```bash
sudo find / -name "libssl.so*" 2>/dev/null
```
输出示例:
```
/usr/lib/x86_64-linux-gnu/libssl.so.3
/usr/local/openssl/lib/libssl.so
```
---
### **3. 查找openssl头文件位置`.h`文件)**
```bash
find /usr -name "openssl" -type d 2>/dev/null
```
输出示例:
```
/usr/include/openssl # 系统默认头文件路径
/usr/local/include/openssl # 手动安装的可能路径
```
---
### **4. 检查OpenSSL版本及安装路径**
```bash
openssl version -a
```
输出中会包含 `OPENSSLDIR`即OpenSSL的配置目录
```
OPENSSLDIR: "/usr/local/ssl"
```
---
### **常见路径说明**
| 文件类型 | 典型路径Ubuntu 20.04 |
|----------------|---------------------------------------------|
| **可执行文件** | `/usr/bin/openssl` |
| **动态库** | `/usr/lib/x86_64-linux-gnu/libssl.so.3` |
| **头文件** | `/usr/include/openssl` |
| **自定义安装** | `/usr/local/openssl/lib`(手动编译时指定) |
---
### **特殊情况处理**
- **手动编译的OpenSSL**
如果通过源码安装(例如指定了 `--prefix=/usr/local/openssl`),库文件通常位于 `/usr/local/openssl/lib`
需要通过 `ldconfig` 注册路径:
```bash
echo "/usr/local/openssl/lib" > /etc/ld.so.conf.d/openssl.conf
ldconfig
```
- **多版本共存问题**
使用 `update-alternatives` 管理多版本(参考 [<sup>1</sup>](https://blog.csdn.net/newbeixue/article/details/103029704))。
---
### **总结**
- **最快方法**:直接运行 `ldconfig -p | grep libssl.so`。
- **检查依赖**:编译报错时,需确认库路径是否在 `LD_LIBRARY_PATH` 中。
- **源码安装**:注意 `--prefix` 参数指定的路径(参考 [<sup>2</sup>](https://www.henghost.com/jishu/25183/))。

View File

@@ -0,0 +1,42 @@
docker network create jenkins
docker run --name jenkins-docker --rm --detach \
--privileged --network jenkins --network-alias docker \
--env DOCKER_TLS_CERTDIR=/certs \
--volume jenkins-docker-certs:/certs/client \
--volume jenkins-data:/var/jenkins_home \
--publish 2376:2376 \
docker:dind --storage-driver overlay2
# build jenkins
cd /root/wdd/jenkins-build
docker build -t harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21 .
# 镜像构建成功
harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21
docker run \
--name jenkins-server \
--restart=on-failure \
--detach \
--network jenkins \
--env DOCKER_HOST=tcp://docker:2376 \
--env DOCKER_CERT_PATH=/certs/client \
--env DOCKER_TLS_VERIFY=1 \
--volume jenkins-data:/var/jenkins_home \
--volume jenkins-docker-certs:/certs/client:ro \
--publish 8080:8080 \
--publish 50000:50000 \
harbor.cdcyy.com.cn/cmii/jenkins-wdd-blueocean:2.492.3-lts-jdk21
docker exec -it jenkins-server /bin/bash
cat /var/jenkins_home/secrets/initialAdminPassword
c99b712eac254e89a90f83d0f280e35a

View File

@@ -0,0 +1,24 @@
FROM jenkins/jenkins:2.492.3-lts-jdk21
USER root
# 使用国内镜像源加速(阿里云)
RUN rm -rf /etc/apt/sources.list.d/* \
&& echo "deb http://mirrors.aliyun.com/debian/ bookworm main non-free contrib" > /etc/apt/sources.list \
&& echo "deb http://mirrors.aliyun.com/debian-security bookworm-security main" >> /etc/apt/sources.list \
&& echo "deb http://mirrors.aliyun.com/debian/ bookworm-updates main non-free contrib" >> /etc/apt/sources.list
RUN apt-get update \
&& apt-get install -y lsb-release ca-certificates curl apt-utils && \
install -m 0755 -d /etc/apt/keyrings && \
curl -fsSL https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg && \
chmod a+r /etc/apt/keyrings/docker.gpg && \
echo "docker key add => $(cat /etc/apt/keyrings/docker.gpg)" && \
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/debian "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null && \
echo "docker apt source add => $(cat /etc/apt/sources.list.d/docker.list) \n\n" && \
apt-get update && apt-get install -y docker-ce-cli && \
apt-get clean && rm -rf /var/lib/apt/lists/*
USER jenkins
RUN jenkins-plugin-cli --plugins "blueocean docker-workflow"

View File

@@ -0,0 +1,138 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: ynydapp
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uavms-pyfusion
namespace: ynydapp
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: pyfusion-conf
configMap:
name: pyfusion-configmap
items:
- key: config.yaml
path: config.yaml
containers:
- name: cmii-uavms-pyfusion
image: 'harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:6.3.1'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: VERSION
value: 6.2.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uavms-pyfusion
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: K8S_NAMESPACE
value: ynydapp
- name: APPLICATION_NAME
value: cmii-uavms-pyfusion
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: ynydapp/cmii-uavms-pyfusion
- name: pyfusion-conf
mountPath: /app/config.yaml
subPath: config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uavms-pyfusion
namespace: ynydapp
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
type: ClusterIP
sessionAffinity: None

View File

@@ -0,0 +1,48 @@
version: "3"
services:
fe:
image: 192.168.186.11:8033/cmii/doris.fe-amd64:2.1.6
hostname: fe
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "2"
environment:
- FE_SERVERS=fe1:172.21.80.2:9010
- FE_ID=1
ports:
- 8030:8030
- 9030:9030
volumes:
- /var/lib/docker/doris/doris-meta:/opt/apache-doris/fe/doris-meta
- /var/lib/docker/doris/fe-log:/opt/apache-doris/fe/log
networks:
doris_net:
ipv4_address: 172.21.80.2
be:
image: 192.168.186.11:8033/cmii/doris.be-amd64:2.1.6
hostname: be
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "2"
environment:
- FE_SERVERS=fe1:172.21.80.2:9010
- BE_ADDR=172.21.80.3:9050
ports:
- 8040:8040
volumes:
- /var/lib/docker/doris/storage:/opt/apache-doris/be/storage
- /var/lib/docker/doris/be-log:/opt/apache-doris/be/log
depends_on:
- fe
networks:
doris_net:
ipv4_address: 172.21.80.3
networks:
doris_net:
ipam:
config:
- subnet: 172.21.80.0/24

View File

@@ -0,0 +1,573 @@
CREATE DATABASE IF NOT EXISTS cmii;
USE cmii;
CREATE TABLE IF NOT EXISTS `dwd_reg_airspace_grid_detail_dd` (
`geoCode` varchar(32) NOT NULL COMMENT '网格码',
`areaCode` varchar(32) NOT NULL COMMENT '空域标识码',
`areaCategory` tinyint NOT NULL COMMENT '空域类型1飞行活动空域 2划设空域',
`geoType` varchar(32) NULL COMMENT ' 网格类型2二维网格3三维网格'
) ENGINE=OLAP
UNIQUE KEY(`geoCode`, `areaCode`)
COMMENT '空域网格明细表'
DISTRIBUTED BY HASH(`areaCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_flight_act_grid_detail_dd` (
`report_date` date NOT NULL COMMENT '活动日期',
`geo_num4` varchar(32) NOT NULL COMMENT '二维网格码',
`flight_code` varchar(32) NOT NULL COMMENT '飞行活动标识码'
) ENGINE=OLAP
UNIQUE KEY(`report_date`, `geo_num4`, `flight_code`)
COMMENT '飞行活动网格明细表'
AUTO PARTITION BY RANGE (date_trunc(`report_date`, 'month'))
()
DISTRIBUTED BY HASH(`report_date`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_2dcode_dd` (
`geo2DCode` varchar(32) NOT NULL COMMENT '二维网格码',
`maxLat` double NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`geo2DCodeInteger` bigint NULL COMMENT '二维网格十进制整型',
`min2DCodeInteger` bigint NULL COMMENT '子网格十进制整型',
`max2DCodeInteger` bigint NULL COMMENT '子网格十进制整型'
) ENGINE=OLAP
UNIQUE KEY(`geo2DCode`)
COMMENT '二维网格位置明细表'
DISTRIBUTED BY HASH(`geo2DCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_3dcode_dd` (
`geo3DCode` varchar(32) NOT NULL COMMENT '三维网格码',
`maxLat` double NOT NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`minHeight` double NULL COMMENT '最小高度',
`maxHeight` double NULL COMMENT '最大高度',
`min3DCodeOx` varchar(32) NULL COMMENT '最小网格码八进制',
`max3DCodeOx` varchar(32) NULL COMMENT '最大网格码八进制'
) ENGINE=OLAP
UNIQUE KEY(`geo3DCode`)
COMMENT '三维网格明细表'
DISTRIBUTED BY HASH(`geo3DCode`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_network_signal_detail_dd` (
`geo_3d_code` varchar(32) NOT NULL COMMENT '三维网格码',
`snr` double NULL COMMENT '信噪比',
`rsrp` double NULL COMMENT '参考信号接收功率,单位dBm',
`rsrq` double NULL COMMENT '参考信号接收质量',
`pci` varchar(320) NULL COMMENT '网络小区编号',
`latest_lat84` double NULL COMMENT '最近一次测量点纬度',
`latest_lng84` double NULL COMMENT '最近一次测量点经度',
`latest_height` double NULL COMMENT '最近一次测量点相对高度',
`latest_time` datetime NULL COMMENT '最近一次测量时间'
) ENGINE=OLAP
UNIQUE KEY(`geo_3d_code`)
COMMENT '网格信号强度明细表'
DISTRIBUTED BY HASH(`geo_3d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_risk_detail_dd` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格码',
`height` smallint NOT NULL COMMENT '相对高度',
`risk_level` tinyint NULL COMMENT '风险等级,-1:缺数据无法计算,1:低风险,2:中风险,3:高风险',
`calculation_time` datetime NULL COMMENT '计算时间'
) ENGINE=OLAP
UNIQUE KEY(`geo_2d_code`, `height`)
COMMENT '网格风险等级明细表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_model_grid_detail_dd` (
`model_id` varchar(32) NOT NULL COMMENT '模型ID',
`geo_3d_code` varchar(32) NOT NULL COMMENT '二维网格码',
`min_lng` double NOT NULL COMMENT '最小经度',
`min_lat` double NULL COMMENT '最小纬度',
`max_lng` double NULL COMMENT '最大经度',
`max_lat` double NULL COMMENT '最大纬度',
`min_height` double NULL COMMENT '最小高度',
`max_height` double NULL COMMENT '最大高度'
) ENGINE=OLAP
UNIQUE KEY(`model_id`, `geo_3d_code`)
COMMENT '模型网格明细表'
DISTRIBUTED BY HASH(`model_id`, `geo_3d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dws_reg_flight_act_grid_1d` (
`report_date` date NOT NULL COMMENT '活动日期',
`geo_num4` varchar(32) NOT NULL COMMENT '二维网格码',
`flight_total` bigint NOT NULL COMMENT '飞行活动统计数量'
) ENGINE=OLAP
UNIQUE KEY(`report_date`, `geo_num4`)
COMMENT '飞行活动网格流量聚合表'
AUTO PARTITION BY RANGE (date_trunc(`report_date`, 'month'))
()
DISTRIBUTED BY HASH(`report_date`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dws_reg_grid_population_1d` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '(二维网格码17级)',
`time` datetime NOT NULL COMMENT '统计时间',
`population` int NULL COMMENT '人口数',
`population_density` double NULL COMMENT '人口密度'
) ENGINE=OLAP
UNIQUE KEY(`geo_2d_code`, `time`)
COMMENT '网格人口统计日表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))
()
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `ods_bridge_object_track_sense` (
`object_id` varchar(64) NOT NULL COMMENT '目标唯一编号',
`provider_code` varchar(32) NOT NULL COMMENT '设备提供方编码',
`device_type` int NOT NULL COMMENT '设备类型',
`device_id` varchar(128) NOT NULL COMMENT '设备id',
`pt_time` bigint NOT NULL COMMENT '上报时间戳',
`msg_cnt` bigint NULL COMMENT '消息编号',
`longitude` double NOT NULL COMMENT '经度',
`latitude` double NOT NULL COMMENT '纬度',
`altitude` float NULL,
`height` float NULL COMMENT '距地高度',
`speed` float NOT NULL COMMENT '目标速度',
`time` datetime(3) NOT NULL COMMENT '探测时间',
`extension` variant NULL COMMENT '扩展信息'
) ENGINE=OLAP
DUPLICATE KEY(`object_id`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))()
DISTRIBUTED BY HASH(`object_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_bridge_uas_track_report` (
`uas_id` varchar(64) NOT NULL COMMENT '无人机识别码',
`third_code` varchar(32) NOT NULL COMMENT '三方平台编码',
`angle` float NOT NULL COMMENT '航迹角',
`speed` float NOT NULL COMMENT '地速',
`latitude` double NOT NULL COMMENT '纬度',
`longitude` double NOT NULL COMMENT '经度',
`altitude` double NOT NULL COMMENT '海拔高度',
`height` float NULL COMMENT '距地高度',
`time` datetime(3) NOT NULL COMMENT '上报时间',
`extension` variant NULL COMMENT '扩展信息'
) ENGINE=OLAP
DUPLICATE KEY(`uas_id`)
COMMENT '合作无人机上报数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'month'))()
DISTRIBUTED BY HASH(`uas_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_warn_item` (
`uas_id` varchar(256) NOT NULL COMMENT '无人机识别码或探测目标id',
`warn_code` varchar(32) NOT NULL COMMENT '告警唯一标识',
`id` bigint NOT NULL COMMENT 'mysql自增ID',
`id_type` tinyint NULL COMMENT '无人机的id类型,1、三方平台 2、监管无人机 3、RemoteID 4.探测轨迹',
`cooperate` tinyint NOT NULL COMMENT '是否为合作类 1.合作; 0.非合作',
`provider_code` varchar(64) NULL COMMENT '设备来源厂家编号',
`device_id` varchar(64) NULL COMMENT '数据来源设备id设备方定义id)',
`device_type` int NULL COMMENT '来源设备类型(0、5G-A基站5ga 1、雷达设备(radar)2、频谱设备 (spec)3、光电设备(oe)4、反制设备,cm5、诱骗设备(dec)6、干扰设备(ifr) 7、指挥车(cv)8、察打一体 (isrs)999、其他设备(other))',
`event` tinyint NOT NULL COMMENT '告警类型(1 偏航预警2 闯入管制区3 闯入临时空域4.非合飞行)',
`warn_key` varchar(32) NULL COMMENT '告警特征值(uasId、event、refcode等产生',
`warning_content` varchar(256) NULL COMMENT '告警信息,主要是存放各种告警说明',
`start_time` datetime NOT NULL COMMENT '开始时间',
`end_time` datetime NULL COMMENT '结束时间',
`status` tinyint NOT NULL COMMENT '告警状态0告警中1:告警结束',
`longitude84` double NULL COMMENT '产生告警时经度(WGS-84)',
`latitude84` double NULL COMMENT '产生告警时纬度(WGS-84)',
`height` double NULL COMMENT '产生告警时高度',
`altitude` double NULL COMMENT '拔高度,单位米',
`speed` double NULL COMMENT '产生告警时的地速度',
`end_type` tinyint NULL COMMENT '结束类型0自动结束1手动结束',
`ref_area_code` varchar(32) NULL COMMENT 'event为23时首次产生告警时关联空域编号 event为3时飞行活动编号',
`ref_area_name` varchar(40) NULL COMMENT 'event为23时首次产生告警时关联空域名称.只存第一个空域名称',
`uav_update_time` datetime NULL COMMENT '无人机信息更新时间(以下为告警最新对应无人机信息)',
`uav_lng` double NULL COMMENT '最近1次告警时无人机经度',
`uav_lat` double NULL COMMENT '最近1次告警时无人机纬度',
`uav_height` double NULL COMMENT '最近1次告警时无人机高度',
`uav_altitude` double NULL COMMENT '最近1次告警时无人机的拔高度单位米',
`uav_speed` double NULL COMMENT '最近1次告警时无人机地速度',
`is_deal` tinyint NULL COMMENT '是否已经处置。 0.未处置 1.已处置',
`update_at` datetime NULL COMMENT '修改时间',
`create_at` datetime NULL COMMENT '创建时间'
) ENGINE=OLAP
DUPLICATE KEY(`uas_id`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`start_time`, 'month'))()
DISTRIBUTED BY HASH(`uas_id`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_network_signal_loss` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`flight_height` smallint NOT NULL COMMENT '相对空域高度,单位:m',
`path_loss` double NOT NULL COMMENT '预估信号损耗,负值,数值越大表示信号质量越好',
`path_loss_normalized` double NOT NULL COMMENT '预估信号损耗标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格网络信号损耗表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_poi_density` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`poi_density` double NOT NULL COMMENT 'poi密度',
`poi_density_normalized` double NOT NULL COMMENT 'poi密度标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格poi密度表'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_reg_grid_pop_exposure_risk` (
`geo_2d_code` varchar(32) NOT NULL COMMENT '二维网格编码',
`pop_exposure_risk` double NOT NULL COMMENT '人口暴露风险',
`pop_exposure_risk_normalized` double NOT NULL COMMENT '人口暴露风险标准化计算值'
) ENGINE=OLAP
DUPLICATE KEY(`geo_2d_code`)
COMMENT '二维网格人口暴露风险'
DISTRIBUTED BY HASH(`geo_2d_code`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_tmp_airline_grid_detail_dd` (
`airLineId` varchar(128) NOT NULL COMMENT '航线ID',
`geo3DCode` varchar(32) NOT NULL COMMENT '三维网格码',
`maxLat` double NOT NULL COMMENT '最大纬度',
`maxLng` double NULL COMMENT '最大经度',
`minLat` double NULL COMMENT '最小纬度',
`minLng` double NULL COMMENT '最小经度',
`minHeight` double NULL COMMENT '最小高度',
`maxHeight` double NULL COMMENT '最大高度'
) ENGINE=OLAP
UNIQUE KEY(`airLineId`, `geo3DCode`)
COMMENT '三维网格明细表'
DISTRIBUTED BY HASH(`airLineId`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `dwd_reg_grid_mock_network_signal_detail_dd` (
`geo_3d_code` varchar(32) NOT NULL COMMENT '三维网格码',
`sampling_time` date NOT NULL COMMENT '采样时间',
`rsrp` double NULL COMMENT '参考信号接收功率,单位dBm'
) ENGINE=OLAP
UNIQUE KEY(`geo_3d_code`, `sampling_time`)
COMMENT 'radio map模拟网格信号强度明细表'
DISTRIBUTED BY HASH(`sampling_time`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"enable_unique_key_merge_on_write" = "true",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728",
"enable_mow_light_delete" = "false"
);
CREATE TABLE IF NOT EXISTS `ods_external_attack_track_report` (
`source` varchar(255) NULL COMMENT '数据来源',
`type` varchar(255) NULL COMMENT '数据来源类型',
`attack_id` varchar(255) NULL COMMENT '告警 id',
`src_ip` varchar(255) NULL COMMENT '源 IP',
`src_port` int NULL COMMENT '源端口',
`timestamp` bigint NULL COMMENT '时间戳',
`dest_ip` varchar(255) NULL COMMENT '目的地ip',
`dest_port` int NULL COMMENT '目的端口',
`attack_type` varchar(255) NULL COMMENT '攻击类型',
`attack_name` varchar(255) NULL COMMENT '攻击名称',
`ext` variant NULL COMMENT '扩展字段'
) ENGINE=OLAP
DUPLICATE KEY(`source`, `type`)
COMMENT '外部攻击数据'
DISTRIBUTED BY HASH(`source`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_external_data_track_report` (
`source` varchar(20) NOT NULL COMMENT '数据来源',
`type` varchar(20) NOT NULL COMMENT '数据类型',
`id` bigint NOT NULL AUTO_INCREMENT(1) COMMENT 'ID',
`def_date` datetime(3) NOT NULL COMMENT '创建时间',
`param` variant NOT NULL COMMENT '数据实体'
) ENGINE=OLAP
DUPLICATE KEY(`source`, `type`)
COMMENT '设备目标探测数据表'
AUTO PARTITION BY RANGE (date_trunc(`def_date`, 'month'))()
DISTRIBUTED BY HASH(`source`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
CREATE TABLE IF NOT EXISTS `ods_external_micro_meteorology` (
`longitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的X坐标',
`latitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的Y坐标',
`altitude` decimal(9,0) NOT NULL COMMENT '网格在投影坐标系下的Z坐标',
`size` tinyint NOT NULL COMMENT '网格大小(m)',
`horizontal_wind_speed` double NULL COMMENT '水平风速(m/s)',
`horizontal_direction` double NULL COMMENT '水平风向',
`vertical_wind_speed` double NULL COMMENT '垂直风速(m/s)',
`time` datetime NOT NULL COMMENT '时间'
) ENGINE=OLAP
DUPLICATE KEY(`longitude`, `latitude`, `altitude`)
COMMENT '微气象数据表'
AUTO PARTITION BY RANGE (date_trunc(`time`, 'day'))()
DISTRIBUTED BY HASH(`altitude`) BUCKETS AUTO
PROPERTIES (
"replication_allocation" = "tag.location.default: 3",
"min_load_replica_num" = "-1",
"is_being_synced" = "false",
"storage_medium" = "hdd",
"storage_format" = "V2",
"inverted_index_storage_format" = "V1",
"light_schema_change" = "true",
"disable_auto_compaction" = "false",
"enable_single_replica_compaction" = "false",
"group_commit_interval_ms" = "10000",
"group_commit_data_bytes" = "134217728"
);
ALTER USER root@'%' IDENTIFIED BY "Qtdq5@!Lw5FgM8p#";
CREATE USER IF NOT EXISTS cmii@'%' IDENTIFIED BY 'Gy8F5jpaFG@G';
GRANT SELECT_PRIV,LOAD_PRIV ON cmii.* TO 'cmii'@'%';

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,82 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: doris-cluster-be-conf
namespace: zyly
labels:
app.kubernetes.io/component: be
data:
be.conf: >
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR="${DORIS_HOME}/log/"
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
brpc_port = 8060
arrow_flight_sql_port = -1
# HTTPS configures
enable_https = false
# path of certificate in PEM format.
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
# path of private key in PEM format.
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# data root path, separate by ';'
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
# eg:
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
# /home/disk2/doris,medium:HDD(default)
#
# you also can specify the properties by setting '<property>:<value>', separate by ','
# property 'medium' has a higher priority than the extension of path
#
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
# storage_root_path = ${DORIS_HOME}/storage
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
# Advanced configurations
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# sys_log_roll_mode = SIZE-MB-1024
# sys_log_roll_num = 10
# sys_log_verbose_modules = *
# log_buffer_level = -1
# aws sdk log level
# Off = 0,
# Fatal = 1,
# Error = 2,
# Warn = 3,
# Info = 4,
# Debug = 5,
# Trace = 6
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
#aws_log_level=0
## If you are not running in aws cloud, you can disable EC2 metadata
#AWS_EC2_METADATA_DISABLED=false

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
namespace: zyly
name: doris-cluster-be-internal
labels:
app.kubernetes.io/component: doris-cluster-be-internal
spec:
ports:
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
selector:
app.kubernetes.io/component: doris-cluster-be
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-be-service
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
ports:
- name: be-port
protocol: TCP
port: 9060
targetPort: 9060
nodePort: 32189
- name: webserver-port
protocol: TCP
port: 8040
targetPort: 8040
nodePort: 31624
- name: heartbeat-port
protocol: TCP
port: 9050
targetPort: 9050
nodePort: 31625
- name: brpc-port
protocol: TCP
port: 8060
targetPort: 8060
nodePort: 31627
selector:
app.kubernetes.io/component: doris-cluster-be
type: NodePort

View File

@@ -0,0 +1,206 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-be
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-be
template:
metadata:
name: doris-cluster-be
labels:
app.kubernetes.io/component: doris-cluster-be
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-be-conf
configMap:
name: doris-cluster-be-conf
defaultMode: 420
- name: be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: be-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
initContainers:
- name: default-init
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/alpine:1.0.0'
command:
- /bin/sh
args:
- '-c'
- sysctl -w vm.max_map_count=2000000 && swapoff -a
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
containers:
- name: be
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.be-amd64:2.1.6'
command:
- /opt/apache-doris/be_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: be-port
containerPort: 9060
protocol: TCP
- name: webserver-port
containerPort: 8040
protocol: TCP
- name: heartbeat-port
containerPort: 9050
protocol: TCP
- name: brpc-port
containerPort: 8060
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
resources:
limits:
cpu: '16'
memory: 32Gi
requests:
cpu: '8'
memory: 32Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: be-storage
mountPath: /opt/apache-doris/be/storage
- name: be-log
mountPath: /opt/apache-doris/be/log
- name: doris-cluster-be-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9050
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8040
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9050
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/be_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-be
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-storage
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: be-log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: nfs-prod-distribute
# volumeMode: Filesystem
serviceName: doris-cluster-be-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: doris-cluster-fe-conf
namespace: zyly
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
#####################################################################
## The uppercase properties are read and exported by bin/start_fe.sh.
## To see all Frontend configurations,
## see fe/src/org/apache/doris/common/Config.java
#####################################################################
CUR_DATE=`date +%Y%m%d-%H%M%S`
# Log dir
LOG_DIR = ${DORIS_HOME}/log
# For jdk 8
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
# Set your own JAVA_HOME
# JAVA_HOME=/path/to/jdk/
##
## the lowercase properties are read by main program.
##
# store metadata, must be created before start FE.
# Default value is ${DORIS_HOME}/doris-meta
# meta_dir = ${DORIS_HOME}/doris-meta
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
arrow_flight_sql_port = -1
# Choose one if there are more than one ip except loopback address.
# Note that there should at most one ip match this list.
# If no ip match this rule, will choose one randomly.
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
# Default value is empty.
# priority_networks = 10.10.10.0/24;192.168.0.0/16
# Advanced configurations
# log_roll_size_mb = 1024
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
sys_log_mode = ASYNC
# sys_log_roll_num = 10
# sys_log_verbose_modules = org.apache.doris
# audit_log_dir = $LOG_DIR
# audit_log_modules = slow_query, query
# audit_log_roll_num = 10
# meta_delay_toleration_second = 10
# qe_max_connection = 1024
# qe_query_timeout_second = 300
# qe_slow_log_ms = 5000
#Fully Qualified Domain Name完全限定域名,开启后各节点之间通信基于FQDN
enable_fqdn_mode = true

View File

@@ -0,0 +1,17 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-internal
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
selector:
app.kubernetes.io/component: doris-cluster-fe
clusterIP: None
type: ClusterIP

View File

@@ -0,0 +1,32 @@
kind: Service
apiVersion: v1
metadata:
name: doris-cluster-fe-service
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
ports:
- name: http-port
protocol: TCP
port: 8030
targetPort: 8030
nodePort: 31620
- name: rpc-port
protocol: TCP
port: 9020
targetPort: 9020
nodePort: 31621
- name: query-port
protocol: TCP
port: 9030
targetPort: 9030
nodePort: 31622
- name: edit-log-port
protocol: TCP
port: 9010
targetPort: 9010
nodePort: 31623
selector:
app.kubernetes.io/component: doris-cluster-fe
type: NodePort

View File

@@ -0,0 +1,190 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: doris-cluster-fe
namespace: zyly
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: doris-cluster-fe
template:
metadata:
name: doris-cluster-fe
labels:
app.kubernetes.io/component: doris-cluster-fe
spec:
imagePullSecrets:
- name: harborsecret
volumes:
- name: meta
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-meta-pvc
- name: log
persistentVolumeClaim:
# claimName: meta
claimName: doris-fe-log-pvc
- name: podinfo
downwardAPI:
items:
- path: labels
fieldRef:
apiVersion: v1
fieldPath: metadata.labels
- path: annotations
fieldRef:
apiVersion: v1
fieldPath: metadata.annotations
defaultMode: 420
- name: doris-cluster-fe-conf
configMap:
name: doris-cluster-fe-conf
defaultMode: 420
containers:
- name: doris-cluster-fe
image: 'hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.fe-amd64:2.1.6'
command:
- /opt/apache-doris/fe_entrypoint.sh
args:
- $(ENV_FE_ADDR)
ports:
- name: http-port
containerPort: 8030
protocol: TCP
- name: rpc-port
containerPort: 9020
protocol: TCP
- name: query-port
containerPort: 9030
protocol: TCP
- name: edit-log-port
containerPort: 9010
protocol: TCP
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONFIGMAP_MOUNT_PATH
value: /etc/doris
- name: USER
value: root
- name: DORIS_ROOT
value: /opt/apache-doris
- name: ENV_FE_ADDR
value: doris-cluster-fe-service
- name: FE_QUERY_PORT
value: '9030'
- name: ELECT_NUMBER
value: '3'
resources:
limits:
cpu: '2'
memory: 2Gi
requests:
cpu: '1'
memory: 1Gi
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
- name: log
mountPath: /opt/apache-doris/fe/log
- name: meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: doris-cluster-fe-conf
mountPath: /etc/doris
livenessProbe:
tcpSocket:
port: 9030
initialDelaySeconds: 80
timeoutSeconds: 180
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health
port: 8030
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 3
startupProbe:
tcpSocket:
port: 9030
timeoutSeconds: 1
periodSeconds: 5
successThreshold: 1
failureThreshold: 60
lifecycle:
preStop:
exec:
command:
- /opt/apache-doris/fe_prestop.sh
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/component
operator: In
values:
- doris-cluster-fe
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
# volumeClaimTemplates:
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: meta
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10G
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
# - kind: PersistentVolumeClaim
# apiVersion: v1
# metadata:
# name: log
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: '10'
# storageClassName: hcms-efs-class
# volumeMode: Filesystem
serviceName: doris-cluster-fe-internal
podManagementPolicy: Parallel

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 180Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: zyly
spec:
storageClassName: hcms-efs-class
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,151 @@
# doris-fe-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-fe-app
namespace: uavcloud-devflight
spec:
replicas: 1
selector:
matchLabels:
app: doris-fe-app
template:
metadata:
labels:
app: doris-fe-app
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: doris-fe-app
image: harbor.cdcyy.com.cn/cmii/doris.fe-amd64:2.1.6
env:
- name: FE_SERVERS
value: "fe1:doris-fe-service:9010" # 使用Service名称进行服务发现
- name: FE_ID
value: "1"
ports:
- containerPort: 8030
- containerPort: 9030
- containerPort: 9010 # 添加内部通信端口
volumeMounts:
- name: fe-meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: fe-log
mountPath: /opt/apache-doris/fe/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: fe-meta
persistentVolumeClaim:
claimName: doris-fe-meta-pvc
- name: fe-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
---
# doris-fe-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-fe-service
namespace: uavcloud-devflight
spec:
selector:
app: doris-fe-app
ports:
- name: http
port: 8030
targetPort: 8030
- name: query
port: 9030
targetPort: 9030
- name: edit
port: 9010 # 暴露FE内部通信端口
targetPort: 9010
---
# doris-be-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-be-app
namespace: uavcloud-devflight
spec:
replicas: 1
selector:
matchLabels:
app: doris-be-app
template:
metadata:
labels:
app: doris-be-app
spec:
imagePullSecrets:
- name: harborsecret
# initContainers:
# - name: wait-for-fe
# image: harbor.cdcyy.com.cn/cmii/busybox:latest
# command: ['sh', '-c', 'until nc -z doris-fe-service 9010; do echo waiting for fe; sleep 2; done;']
# resources:
# limits:
# memory: 2Gi
# cpu: "2"
# requests:
# memory: 200Mi
# cpu: 200m
containers:
- name: doris-be-app
image: harbor.cdcyy.com.cn/cmii/doris.be-amd64:2.1.6
env:
- name: FE_SERVERS
value: "fe1:doris-fe-service:9010"
- name: BE_ADDR
value: "doris-be-service:9050" # 使用Service名称
ports:
- containerPort: 8040
- containerPort: 9050 # 添加BE通信端口
volumeMounts:
- name: doris-be-storage
mountPath: /opt/apache-doris/be/storage
- name: doris-be-log
mountPath: /opt/apache-doris/be/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: doris-be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: doris-be-log
persistentVolumeClaim:
claimName: doris-be-log-pvc
---
# doris-be-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-be-service
namespace: uavcloud-devflight
spec:
selector:
app: doris-be-app
ports:
- name: http
port: 8040
targetPort: 8040
- name: be-port
port: 9050 # 暴露BE通信端口
targetPort: 9050

View File

@@ -0,0 +1,151 @@
# doris-fe-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-fe-app
namespace: zyly
spec:
replicas: 1
selector:
matchLabels:
app: doris-fe-app
template:
metadata:
labels:
app: doris-fe-app
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: doris-fe-app
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.fe-amd64:2.1.6
env:
- name: FE_SERVERS
value: "doris-fe-service:9010" # 使用Service名称进行服务发现
- name: FE_ID
value: "1"
ports:
- containerPort: 8030
- containerPort: 9030
- containerPort: 9010 # 添加内部通信端口
volumeMounts:
- name: fe-meta
mountPath: /opt/apache-doris/fe/doris-meta
- name: fe-log
mountPath: /opt/apache-doris/fe/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: fe-meta
persistentVolumeClaim:
claimName: doris-fe-meta-pvc
- name: fe-log
persistentVolumeClaim:
claimName: doris-fe-log-pvc
---
# doris-fe-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-fe-service
namespace: zyly
spec:
selector:
app: doris-fe-app
ports:
- name: http
port: 8030
targetPort: 8030
- name: query
port: 9030
targetPort: 9030
- name: edit
port: 9010 # 暴露FE内部通信端口
targetPort: 9010
---
# doris-be-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-be-app
namespace: zyly
spec:
replicas: 1
selector:
matchLabels:
app: doris-be-app
template:
metadata:
labels:
app: doris-be-app
spec:
imagePullSecrets:
- name: harborsecret
# initContainers:
# - name: wait-for-fe
# image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/busybox:latest
# command: ['sh', '-c', 'until nc -z doris-fe-service 9010; do echo waiting for fe; sleep 2; done;']
# resources:
# limits:
# memory: 2Gi
# cpu: "2"
# requests:
# memory: 200Mi
# cpu: 200m
containers:
- name: doris-be-app
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/doris.be-amd64:2.1.6
env:
- name: FE_SERVERS
value: "doris-fe-service:9010"
- name: BE_ADDR
value: "doris-be-service:9050" # 使用Service名称
ports:
- containerPort: 8040
- containerPort: 9050 # 添加BE通信端口
volumeMounts:
- name: doris-be-storage
mountPath: /opt/apache-doris/be/storage
- name: doris-be-log
mountPath: /opt/apache-doris/be/log
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 2Gi
cpu: "1"
volumes:
- name: doris-be-storage
persistentVolumeClaim:
claimName: doris-be-storage-pvc
- name: doris-be-log
persistentVolumeClaim:
claimName: doris-be-log-pvc
---
# doris-be-service.yaml
apiVersion: v1
kind: Service
metadata:
name: doris-be-service
namespace: zyly
spec:
selector:
app: doris-be-app
ports:
- name: http
port: 8040
targetPort: 8040
- name: be-port
port: 9050 # 暴露BE通信端口
targetPort: 9050

View File

@@ -0,0 +1,60 @@
---
# pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-meta-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-fe-log-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-storage-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 180Gi # 根据实际存储需求调整
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: doris-be-log-pvc
namespace: uavcloud-devflight
spec:
storageClassName: nfs-prod-distribute
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,71 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fe-configmap
namespace: zyly
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
# the output dir of stderr and stdout
LOG_DIR = ${DORIS_HOME}/log
JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xlog:gc*:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time"
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC
sys_log_mode = NORMAL
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
arrow_flight_sql_port = 9090
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
enable_fqdn_mode = true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: be-configmap
namespace: zyly
labels:
app.kubernetes.io/component: be
data:
be.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
PPROF_TMPDIR="$DORIS_HOME/log/"
JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# since 1.2, the JAVA_HOME need to be set to run BE process.
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,lg_tcache_max:20,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
arrow_flight_sql_port = 39091
brpc_port = 8060

View File

@@ -0,0 +1,94 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: zyly
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doris-fe-2000g
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-56
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2000Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: uavcloud.env
operator: In
values:
- doris
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doris-1-9000g-pvc
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-57
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-nfs-sc-58
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1000Gi

View File

@@ -0,0 +1,120 @@
---
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: zyly
spec:
feSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 1
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doriscluster-storage0
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
# notice: if the storage size less 5G, fe will not start normal.
requests:
storage: 500Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
beSpec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
replicas: 3
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage1
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage2
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/log
name: doriscluster-storage3
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: doris-static-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 9000Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: cmlc-nfs-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,340 @@
# Source: doris-operator/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: serviceaccount
app.kubernetes.io/instance: controller-doris-operator-sa
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: doris-operator
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/finalizers
verbs:
- update
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: doris-operator/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: clusterrolebinding
app.kubernetes.io/instance: doris-operator-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: doris-operator
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/leader-election-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: role
app.kubernetes.io/instance: leader-election-role
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-role
namespace: zyly
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: doris-operator/templates/leader-election-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: rolebinding
app.kubernetes.io/instance: leader-election-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-rolebinding
namespace: zyly
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: zyly
---
# Source: doris-operator/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-operator
namespace: zyly
labels:
control-plane: doris-operator
app.kubernetes.io/name: deployment
app.kubernetes.io/instance: doris-operator
app.kubernetes.io/component: doris-operator
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
spec:
selector:
matchLabels:
control-plane: doris-operator
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: doris-operator
labels:
control-plane: doris-operator
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
# It is considered best practice to support multiple architectures. You can
# build your manager image using the makefile target docker-buildx.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: doris-deploy
operator: In
values:
- "true"
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
containers:
- command:
- /dorisoperator
args:
- --leader-elect
image: 172.16.100.55:8033/cmii/doris.k8s-operator:1.3.1
name: dorisoperator
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
requests:
cpu: 2
memory: 4Gi
limits:
cpu: 2
memory: 4Gi
serviceAccountName: doris-operator
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,28 @@
apiVersion: v1
clusters:
- name: clusterIpByhncm
cluster:
server: https://36.137.146.187:6443
insecure-skip-tls-verify: true
- name: clusterDNSByhncm
cluster:
server: https://apiserver.cluster.local:6443
certificate-authority-data: >-
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3RENDQWRTZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQ0FYRFRJME1EY3lOREUyTXpJeE0xb1lEekl4TWpRd056QXhNVFl6TWpFeldqQVZNUk13RVFZRApWUVFERXdwcmRXSmxjbTVsZEdWek1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA0a3gyZFRpTHpCQWZPU2FjVkV5dmlIOE1GNTZBYmlzZnY1cXFOUVVlUk84ekFrVWx1NENqRHkzRDhTVVJiVnoKTmFKUXMwc3RNU00zd09mNDFFbGVZREF0amR3ZXpNZDZDdXlaYkhPQnRteE9heGdTdWlid2hqSGcwTTZzRndiTQplNzJVYy8rcGY2R3dqaDdXc0N0bk02ZG1hZ0FmVzhicGJOMWNIRERIblRJbzlIbEhBNURFUE1GYkk3VGkyQ0hZCjl6UE01MlpZazR1M2RuWDRPQWI3K0hXUjdidEFLYzFIdWxreEhqOHZnNXN1bmtGSjhia1BGYmRQY3BmSmhPREYKSjljcVllUXpYWGpXSmZvUFBGZFNlMDAvc2NXYlNSYjlRa1Z3QXl5VTRKcVl3UU9CUDIxNm9Sd3FUTUJjTkk4WQpQRHgwY0paVjliVXpEazNtdVN4eFF3SURBUUFCbzBVd1F6QU9CZ05WSFE4QkFmOEVCQU1DQXFRd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFkQmdOVkhRNEVGZ1FVcFAyalRRQU1JanIrQ1QxTkwxZXkxT2J1UWtZd0RRWUoKS29aSWh2Y05BUUVMQlFBRGdnRUJBSGhRM3NkSzV6WitSV0YxWWgvQklxMHE3NXpMcm1BdEFoNWxxL01sTFgvcApuTjhZYm9QZk4wdG84ekxtbnNCcHFDby9nOXozUENyS3lFbkkwNVBwYkkveHk4MW0xUXdrcEVMUWJPTVFXbk9JCnpRZmJhTktXVFJiR2pYWGtPMWVkMVZmV0YyN2p4amhlSW1kWWZQQjQxYkVjMGh4bnRNazB0NXdxZ3M2ZFVwdUMKQk9vVUhqOUxVUDIwZ0VqUmhEbnZPMGNERmU5SGd4Z1E2QTJnUDcraW8rbUx6M0xlWDR4a2ZRcXM1YWtEQVVocwpvTXBKUUlmZnRjZzRiWjR6dWZRT0hVaDM2QVBWSVI1NTkreUp5V1FhdlRyVUFQTjFPNG1kaTlOVEN2bXp3K3RnCmZaMVNtc3B2SFVDU1ZXM1BxQ3ZQaXNXeWFYd3dMUjRORThDZjIycXRVZXM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
users:
- name: userByhncm
user:
client-certificate-data: >-
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJekNDQWd1Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJMU1EUXlOVEEyTURnek9Gb1hEVE13TURReU5EQTJNRGd6T0Zvd1N6RVRNQkVHQTFVRQpDaE1LYzNsemRHVnRPbXRqY3pFME1ESUdBMVVFQXhNcllXUmhaVE0xWkdRME56WXhOREpsTlRrelpETTBaRE5rCk1tTmhOR0ZoTkRJdE1qSXhOREl3TWpBMk16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0MKZ2dFQkFONFRzRFBBTWJnNk4wY2l4Q002YjhmeHpUeFBWaG9LL2REemxVeXlqZEpnQ2hLNFpLVzhqR3paa3p1SQo0d0NxWmx1cVBBeGNoY0xnS1hjVnJZSFF6OWhoT2NRL2hHK3c4Y2Znck16MFlGckpDRlhXU2NTN3lJbTJGNHk2CkFWenlTbVAzMktTdVRzL0xGZlJYam5sclNVSjJyeWFaWlhBUENRMXJaT3BQRnZiQTA5VzY1WTJLMHJFZ25tYUQKVng4SUFGUXVtQS82NUpqQ2NmK0pLS0p4cTlnU2l4RHFZTXF5MGxianU3cVNVeFhUaFR3MGsvYjdmM1d4U0phTgp3RnU0ckE1czhPYTRhTjd3STAxWmllNmMvS2dxbUVkVDIxclFvMTFRUCs3OFdmcGc4bDIvOFd1WVZiY0pWTmZwClEzMlplWmJwZFUrZ2pnVC92YkY2bWpmdEJNRUNBd0VBQWFOSU1FWXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUcKQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01DTUI4R0ExVWRJd1FZTUJhQUZLVDlvMDBBRENJNi9nazlUUzlYc3RUbQo3a0pHTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCdFcrOStNeWQ0b0FIdDBYK203ckdlR01paXVhbXhoM3RlCjc2YUtnemRDUDU0cS8yd3hmZHZlRmhyT1Roamw4bkpSZzJMUGRFSjR2ek5WWW9lMDc1L3hjbDRFQ3Y5dUUrM2UKSkFzSjBjSlEzMmlmRGZvRmovNE52djZSN2J1bXpwOGlKK0UxOXBmZ3ZXeDFzcHNqYXgzOVBqTUtseWhqSGp6YwpIOXpLeXV1QXkyMG10c0c1RW5mcnlSb2pqS0NGd0xNK0dnT21rYlRVRGczZk9jSXFQT3pYdVd4OHFzaTB0dk1oCkJyUXhRMVlWVkN4UlBLOW4rYzNHVmdCZE5ZdTRJSVFURmdBNURhUTZ4WXZzamo4eXpIbTdOYTJwMTZ2ODFnVlEKOEJxdHBOQWQwZlBQUDdjZ0xucUNaNGExalFxQld1UlExclhyNFgzK1R4NVp1azNsNEVlRgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: >-
LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBM2hPd004QXh1RG8zUnlMRUl6cHZ4L0hOUEU5V0dncjkwUE9WVExLTjBtQUtFcmhrCnBieU1iTm1UTzRqakFLcG1XNm84REZ5Rnd1QXBkeFd0Z2REUDJHRTV4RCtFYjdEeHgrQ3N6UFJnV3NrSVZkWkoKeEx2SWliWVhqTG9CWFBKS1kvZllwSzVPejhzVjlGZU9lV3RKUW5hdkpwbGxjQThKRFd0azZrOFc5c0RUMWJybApqWXJTc1NDZVpvTlhId2dBVkM2WUQvcmttTUp4LzRrb29uR3IyQktMRU9wZ3lyTFNWdU83dXBKVEZkT0ZQRFNUCjl2dC9kYkZJbG8zQVc3aXNEbXp3NXJobzN2QWpUVm1KN3B6OHFDcVlSMVBiV3RDalhWQS83dnhaK21EeVhiL3gKYTVoVnR3bFUxK2xEZlpsNWx1bDFUNkNPQlArOXNYcWFOKzBFd1FJREFRQUJBb0lCQUdWQkF0REZxNlc4M1dGUQp4NjdzUVBVZ0daeUs2ODRaL3RvYmhaSWZ3eXdOWCtwaktuaXF2RGdSQitGWUszQXRRVmhBQ1ByMTRJTVc0N0VKCk1FQUZMZzhSWFY1T081c1ZTVmdCNTZmWE1HdVVSM21qcDZUTW5jVzBRWTZIYklHdGN4K2JiS3JCVXV2SlEreFIKVTRPbTJCTTJ1K3RVZTN5WlRsNVEvZ1ViSkhSQ1l6a0JtUVZUd29JeGRTb0VaV1RUNHhBa2E4anBMajgxV2pZdQpXdTIyWW9qNEZLYmtpNExsNzhEdzRYVHg5S0RLN1ArcGZINk0xQ1ZBanRuTWI3Qmo0NkJML3dhREFHWCs4YU5ECnNOcExGSDQ2SEQ3ZGR6L3g5TDc0ODdYVG4yMC9kVm9pMkh5SUlaZWt6ZTZUZzlLNk42SEpIejVMQ1kxUmRHOXEKa3pxU2w4RUNnWUVBL2RFSVZlWWtTVXU1YVlaSitTQmQ5bkphWGFKSGc5UTIwQ09nYlNyRG5pdHJkbDFzMG5XcApUL2UvWmhzRmpBV1ZiT3RTeWI4K0hEdVA2NlZ0UkVSUXBMZjUvVDFDUDF5S2Z6a0I2KzlBM3lJRW5URWdEZU9HClJuRzF6a1poUXZ4Z29ESldVS1FTVEtheXFEVkpFTXllOUlRY0d6SitmRElaYjVuSnk2a2V2UmtDZ1lFQTMvekIKMCsraUtxaTc1bVVxdXcwU2NOeHRiZnZabEF4aXhXaW9IZHpRZTd4WHU4NlB2YTRRbXlIYXZ4SSs0TzRxWkZaWQpBa3lRYmM2WWpQbk5tYTgyMmlTOGNyUE5WZHFUMXN2VFh2NWJwNDdZKzNiQ0xGelZOU0ppbzZJNFpwVnJ4cFBDCnhqSzgvbUhlemRqMHM5NEVRZjNIc29ReURJc1duQlQ1aDMvT1Vla0NnWUFrWHNLeUJHTm91VmdkUGVmMXNlU2sKL3VmaUNQSDBld0ZMdS84dUJYVzRnWUpKbU1PU09OZ0ZKSHdodG5lS0EvYlFmN3dDT3N3OG9YQlRGUVplUk9mRgpZck9JelRGa0FPKzdvVUNjUFZGdlorRi9ZTjIxLzhkcTZycGhqNFk2WWcyNmh0d0ZmQzAvSEpmM01JT0N5Nzl2ClRFcjJ2cnFQTTJLZnNrclBTekdqYVFLQmdRRFNWV0lXaVFMNW1EWEtwdG0zaVJEMkZxaFVEWnBGNGxPbUdSODEKMjdPVFhrdnN1bzVVaW01aG55cnhIa3Nab2MwQkordkJUUENiak5QSUhwUXBhMVhSQjRwSEUvMFFVQTlnTjBzbgoreWRLYzJQaXo2U0xVQ21PbWRtUzI3U204RTJpemdLclo5Mzk2eDMwbzNVMnFwZkkrRzFjZjdNWUFFeWRDVDFCCk5vMGljUUtCZ1FDY2dueEtzYVowZzF2VHYyOXRyZHBUcS9UVWVKek1jSlU3WEp0Z0p2OXJMamFqN2lhb01UeTIKY2xSVkRjVzN5OUhualN0Rk9GN0gxVlBLZEFvaUlWdWs5TjlLalk3VkRUWmdPY0QwU3NKd1lMOUl2SEFIT3ArZAozUVpZV3VBQnBlaFlqK2svQm0xWjVyVUg5S05uQ24zRVJ0RHNCTGJYRUEvemlONnFEZnpSSnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
contexts:
- name: cxtIPByhncm
context:
cluster: clusterIpByhncm
user: userByhncm
- name: cxtDNSByhncm
context:
cluster: clusterDNSByhncm
user: userByhncm
current-context: cxtIPByhncm

View File

@@ -0,0 +1,174 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmii-uav-watchdog
namespace: zyly
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
template:
metadata:
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
containers:
- name: cmii-uav-watchdog
image: hncm-fc766f84.ecis.changsha-2.cmecloud.cn/cmii/cmii-uav-watchdog:2025-04-10-15-24-03
imagePullPolicy: Always
env:
- name: K8S_NAMESPACE
value: zyly
- name: APPLICATION_NAME
value: cmii-uav-watchdog
- name: CUST_JAVA_OPTS
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
- name: NACOS_REGISTRY
value: "helm-nacos:8848"
- name: NACOS_DISCOVERY_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NACOS_DISCOVERY_PORT
value: "8080"
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
- name: NACOS_USERNAME
value: "developer"
- name: NACOS_PASSWORD
value: "Deve@9128201"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: LIMIT_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.cpu
- name: LIMIT_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: limits.memory
- name: REQUEST_CPU
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.cpu
- name: REQUEST_MEMORY
valueFrom:
resourceFieldRef:
containerName: cmii-uav-watchdog
resource: requests.memory
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/cmii-uav-watchdog/
readOnly: false
subPath: zyly/cmii-uav-watchdog
- name: mysql-data
mountPath: /bitnami/mysql
- name: cmii-uav-watchdog-conf
mountPath: /cmii/cmii-uav-watchdog/config.yaml
subPath: config.yaml
volumes:
- name: mysql-data
persistentVolumeClaim:
claimName: helm-mysql
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: cmii-uav-watchdog-conf
configMap:
name: cmii-uav-watchdog-configmap
items:
- key: config.yaml
path: config.yaml
---
apiVersion: v1
kind: Service
metadata:
name: cmii-uav-watchdog
namespace: zyly
labels:
cmii.type: backend
cmii.app: cmii-uav-watchdog
octopus/control: backend-app-1.0.0
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/app-version: 6.2.0
spec:
type: NodePort
selector:
cmii.type: backend
cmii.app: cmii-uav-watchdog
ports:
- name: backend-tcp
port: 8080
protocol: TCP
targetPort: 8080
---
kind: ConfigMap
apiVersion: v1
metadata:
name: cmii-uav-watchdog-configmap
namespace: zyly
data:
config.yaml: |-
server:
port: "8080" # 服务器端口
tier_one_auth:
tier_one_secret: "NK537TIWSUOFIS7SYCUJ6A7FPOGFVM3UH67TJRX3IYQAHKZXK2X7SBAA6JOXZVSV3U6K5YZUX7Q6TWOPK6YCRU6MIML33ZJFBN55I2Q" # TOTP密钥
time_offset_allowed: 30 # 允许的时间偏移(秒)
watchdog_center:
url: "https://watchdog-center.example.com" # 一级授权中心地址
project:
project_namespace: "zyly" # 项目命名空间
tier_two_auth:
tier_two_secret: "your_tier_two_secret_here" # 二级授权密钥

Some files were not shown because too many files have changed in this diff Show More