Compare commits
62 Commits
main
...
22ea4b8359
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22ea4b8359 | ||
|
|
32c82b2751 | ||
|
|
ae4bdb7dbc | ||
|
|
ae1baa4c38 | ||
|
|
c8d897622d | ||
|
|
86bcbad396 | ||
|
|
ab6a130950 | ||
|
|
8e4444a7cc | ||
|
|
34b5f80704 | ||
|
|
35646ff89f | ||
|
|
06b044dabc | ||
|
|
5c2325b7e4 | ||
|
|
2e96490926 | ||
|
|
3cf5e369c1 | ||
|
|
7c92512a7e | ||
|
|
db3d259a0a | ||
|
|
5c39bd7594 | ||
|
|
bffb643a56 | ||
|
|
b28c6462f1 | ||
|
|
c10554c218 | ||
|
|
b6cc5abc63 | ||
|
|
8fc55e2e28 | ||
|
|
6de29630b5 | ||
|
|
3ad2533550 | ||
|
|
7a703dccc4 | ||
|
|
72bc56b5e5 | ||
|
|
16c041e3eb | ||
|
|
e8f0e0d4a9 | ||
|
|
c751c21871 | ||
|
|
b8170e00d4 | ||
|
|
60a1849207 | ||
|
|
5a8aa53d64 | ||
|
|
ce0395ae66 | ||
|
|
dabf63f10f | ||
|
|
e826b55240 | ||
|
|
66dca6a080 | ||
|
|
1f6dcc3ef0 | ||
|
|
46fd5f7d97 | ||
|
|
b8f0b14852 | ||
|
|
a0811d62e7 | ||
|
|
5ca5689083 | ||
|
|
5bfcb98e03 | ||
|
|
0d3bb30eed | ||
|
|
4edaf9f35a | ||
|
|
4135195430 | ||
|
|
9f4631af91 | ||
|
|
af3e058af4 | ||
|
|
fa0e4a0734 | ||
|
|
5a3c53969c | ||
|
|
8f5f85826c | ||
|
|
88cb1e1bb1 | ||
|
|
07cf7a12b7 | ||
|
|
4b1712b67f | ||
|
|
9b026a2ec7 | ||
|
|
724ef6424c | ||
|
|
332cc1d9eb | ||
|
|
bf45eeb735 | ||
|
|
f901992d92 | ||
|
|
98b0e14304 | ||
|
|
82bdcca604 | ||
|
|
d5cbaded65 | ||
|
|
327d12f789 |
19
.aiexclude
Normal file
19
.aiexclude
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
|
||||||
|
.cursorignore
|
||||||
|
.cursor
|
||||||
|
.idea
|
||||||
|
.run
|
||||||
|
|
||||||
|
agent
|
||||||
|
agent-tmp
|
||||||
|
|
||||||
|
|
||||||
|
common
|
||||||
|
|
||||||
|
message_pusher
|
||||||
|
port_forwarding
|
||||||
|
server-go
|
||||||
|
socks5_txthinking
|
||||||
|
source
|
||||||
|
pom.xml
|
||||||
|
setting-aliyun.xml
|
||||||
60
.cursor/rules/agent-wdd-rules.mdc
Normal file
60
.cursor/rules/agent-wdd-rules.mdc
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
description: 构建agent-wdd的特定上下文的规则
|
||||||
|
globs: *.go
|
||||||
|
---
|
||||||
|
|
||||||
|
# 你是一个精通golang的编程大师,熟练掌握github.com/spf13/cobra框架,能够构建出非常现代的cli工具
|
||||||
|
@.cursorignore 请忽略这些目录下的文件
|
||||||
|
|
||||||
|
# 整个项目的架构结构如下
|
||||||
|
1. base 服务器基础操作 相关的功能存放于 [Base.go](mdc:agent-wdd/cmd/Base.go)
|
||||||
|
1. docker docker相关的操作
|
||||||
|
1. online 使用网络安装特定版本的docker
|
||||||
|
2. remove 卸载docker
|
||||||
|
3. local 从本地docker二级制文件安装docker
|
||||||
|
2. dockercompose dockercompose相关的操作
|
||||||
|
1. online 使用网络安装特定版本的dockercompose
|
||||||
|
2. remove 卸载dockercompose
|
||||||
|
3. local 从本地安装dockercompose
|
||||||
|
3. tools 利用本机的yum,apt等从网络安装常用的软件
|
||||||
|
4. ssh ssh相关的操作
|
||||||
|
1. key 安装特定的ssh-key
|
||||||
|
2. port 修改sshd的端口为特定端口
|
||||||
|
3. config 修改sshd的配置为特定配置
|
||||||
|
5. swap 关闭本机的swap缓存
|
||||||
|
6. selinux 关闭本机的selinux相关内容
|
||||||
|
7. firewall 关闭本机的防火墙相关的设置
|
||||||
|
8. sysconfig 修改主机sysconfig相关的内容
|
||||||
|
2. zsh zsh相关的内容 自动安装配置zsh [Zsh.go](mdc:agent-wdd/cmd/Zsh.go)
|
||||||
|
3. proxy 主机代理相关的内容
|
||||||
|
1. xray xray相关的内容
|
||||||
|
1. install 安装最新版本的xray
|
||||||
|
2. local 从本机安装xray
|
||||||
|
3. upgrade 卸载xray
|
||||||
|
2. vmess 一键设置vmess的代理模式
|
||||||
|
3. vless 一键设置vless的代理模式
|
||||||
|
4. sysconfig 修改主机proxy相关的内核参数
|
||||||
|
4. acme acme相关的内容
|
||||||
|
1. install 安装acme.sh
|
||||||
|
2. cert 为特定域名申请证书文件
|
||||||
|
3. list 列出本地存在的证书
|
||||||
|
5. wdd
|
||||||
|
1. host 更新所有的hosts
|
||||||
|
2. resolve 更新主机的resolve
|
||||||
|
3. agent 此部分才是octopus-agent的内容
|
||||||
|
1. install
|
||||||
|
2. upgrade
|
||||||
|
3. remove
|
||||||
|
4. upgrade 更新octopus-wdd自身
|
||||||
|
6. security
|
||||||
|
1. ssh
|
||||||
|
7. info 获取主机相关的信息,并且保存至config文件 实现在 [Info.go](mdc:agent-wdd/cmd/Info.go)
|
||||||
|
1. cpu cpu相关的信息 [CPU.go](mdc:agent-wdd/config/CPU.go)
|
||||||
|
2. os 操作系统相关的信息 [OS.go](mdc:agent-wdd/config/OS.go)
|
||||||
|
3. mem mem相关的信息 [Memory.go](mdc:agent-wdd/config/Memory.go)
|
||||||
|
4. disk disk相关的信息 [Disk.go](mdc:agent-wdd/config/Disk.go)
|
||||||
|
5. network 网络相关的内容 [Network.go](mdc:agent-wdd/config/Network.go)
|
||||||
|
6. all 主机全部的信息
|
||||||
|
8. version 打印octopus-agent的构建版本信息
|
||||||
|
9. config octopus-wdd使用的配置文件 文件
|
||||||
|
1. show
|
||||||
9
.cursorignore
Normal file
9
.cursorignore
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv)
|
||||||
|
|
||||||
|
agent-deploy/
|
||||||
|
message_pusher/
|
||||||
|
port_forwarding/
|
||||||
|
server/
|
||||||
|
server-go/
|
||||||
|
socks_txthinking/
|
||||||
|
source/
|
||||||
15
.run/CMII镜像同步-11.8-ARM.run.xml
Normal file
15
.run/CMII镜像同步-11.8-ARM.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="CMII镜像同步-11.8-ARM" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="root@192.168.11.8:22" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestPullFromEntityAndSyncConditionally\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/CmiiUpdater-35.70.run.xml
Normal file
15
.run/CmiiUpdater-35.70.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="CmiiUpdater-35.70" type="GoTestRunConfiguration" factoryName="Go Test" singleton="false">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestUpdateCmiiDeploymentImageTag\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/Cmii镜像同步-35.70.run.xml
Normal file
15
.run/Cmii镜像同步-35.70.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="Cmii镜像同步-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestPullFromEntityAndSyncConditionally\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/DCU全部CMII镜像.run.xml
Normal file
15
.run/DCU全部CMII镜像.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="DCU全部CMII镜像" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestPullFromEntityAndSyncConditionally\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/DEMO更新-3570.run.xml
Normal file
15
.run/DEMO更新-3570.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="DEMO更新-3570" type="GoTestRunConfiguration" factoryName="Go Test" singleton="false">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestUpdateCmiiImageTagFromNameTagMap\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
7
.run/TestUpdateCmiiDeploymentImageTag in wdd.io_agent-operator.run.xml → .run/DEMO重启-3570.run.xml
Executable file → Normal file
7
.run/TestUpdateCmiiDeploymentImageTag in wdd.io_agent-operator.run.xml → .run/DEMO重启-3570.run.xml
Executable file → Normal file
@@ -1,14 +1,15 @@
|
|||||||
<component name="ProjectRunConfigurationManager">
|
<component name="ProjectRunConfigurationManager">
|
||||||
<configuration default="false" name="TestUpdateCmiiDeploymentImageTag in wdd.io/agent-operator"
|
<configuration default="false" name="DEMO重启-3570" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
type="GoTestRunConfiguration" factoryName="Go Test" singleton="false" nameIsGenerated="true">
|
|
||||||
<module name="ProjectOctopus"/>
|
<module name="ProjectOctopus"/>
|
||||||
|
<target name="wdd-dev-35.70"/>
|
||||||
<working_directory value="$PROJECT_DIR$/agent-operator"/>
|
<working_directory value="$PROJECT_DIR$/agent-operator"/>
|
||||||
<kind value="PACKAGE"/>
|
<kind value="PACKAGE"/>
|
||||||
<package value="wdd.io/agent-operator"/>
|
<package value="wdd.io/agent-operator"/>
|
||||||
<directory value="$PROJECT_DIR$"/>
|
<directory value="$PROJECT_DIR$"/>
|
||||||
<filePath value="$PROJECT_DIR$"/>
|
<filePath value="$PROJECT_DIR$"/>
|
||||||
|
<option name="build_on_remote_target" value="true"/>
|
||||||
<framework value="gotest"/>
|
<framework value="gotest"/>
|
||||||
<pattern value="^\QTestUpdateCmiiDeploymentImageTag\E$"/>
|
<pattern value="^\QTestRestartCmiiDeployment\E$"/>
|
||||||
<method v="2"/>
|
<method v="2"/>
|
||||||
</configuration>
|
</configuration>
|
||||||
</component>
|
</component>
|
||||||
28
.run/Middle镜像-35.70.run.xml
Normal file
28
.run/Middle镜像-35.70.run.xml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="Middle镜像-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestFetchDependencyRepos_Middle\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
<configuration default="false" name="Middle镜像-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestFetchDependencyRepos_Middle\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/Middle镜像-ARM-11.8.run.xml
Normal file
15
.run/Middle镜像-ARM-11.8.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="Middle镜像-ARM-11.8" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="root@192.168.11.8:22" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestFetchDependencyRepos_Middle\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
@@ -1,12 +1,20 @@
|
|||||||
<component name="ProjectRunConfigurationManager">
|
<component name="ProjectRunConfigurationManager">
|
||||||
<configuration default="false" name="ServerApplication" type="SpringBootApplicationConfigurationType"
|
<configuration default="false" name="ServerApplication" type="SpringBootApplicationConfigurationType" factoryName="Spring Boot">
|
||||||
factoryName="Spring Boot">
|
<module name="server" />
|
||||||
<module name="server"/>
|
<projectPathOnTarget name="projectPathOnTarget" value="/data/wdd/ProjectOctopus" />
|
||||||
<projectPathOnTarget name="projectPathOnTarget" value="/data/wdd/ProjectOctopus"/>
|
<target name="@@@LOCAL@@@" />
|
||||||
<target name="@@@LOCAL@@@"/>
|
<option name="SPRING_BOOT_MAIN_CLASS" value="io.wdd.ServerApplication" />
|
||||||
<option name="SPRING_BOOT_MAIN_CLASS" value="io.wdd.ServerApplication"/>
|
<method v="2">
|
||||||
<method v="2">
|
<option name="Make" enabled="true" />
|
||||||
<option name="Make" enabled="true"/>
|
</method>
|
||||||
</method>
|
</configuration>
|
||||||
</configuration>
|
<configuration default="false" name="ServerApplication" type="SpringBootApplicationConfigurationType" factoryName="Spring Boot">
|
||||||
|
<module name="server" />
|
||||||
|
<projectPathOnTarget name="projectPathOnTarget" value="/data/wdd/ProjectOctopus" />
|
||||||
|
<target name="@@@LOCAL@@@" />
|
||||||
|
<option name="SPRING_BOOT_MAIN_CLASS" value="io.wdd.ServerApplication" />
|
||||||
|
<method v="2">
|
||||||
|
<option name="Make" enabled="true" />
|
||||||
|
</method>
|
||||||
|
</configuration>
|
||||||
</component>
|
</component>
|
||||||
15
.run/查询应用分支-3570.run.xml
Normal file
15
.run/查询应用分支-3570.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="查询应用分支-3570" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestCmiiK8sOperator_DeploymentOneInterface\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
16
.run/清理CMII镜像-35.70.run.xml
Normal file
16
.run/清理CMII镜像-35.70.run.xml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="清理CMII镜像-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator/image" />
|
||||||
|
<root_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator/image" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestImagePruneAllCmiiImages\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
15
.run/重启DEMO-3570.run.xml
Normal file
15
.run/重启DEMO-3570.run.xml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="重启DEMO-3570" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestRestartCmiiDeployment\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
16
.run/院内Harbor清理-35.70.run.xml
Normal file
16
.run/院内Harbor清理-35.70.run.xml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
<component name="ProjectRunConfigurationManager">
|
||||||
|
<configuration default="false" name="院内Harbor清理-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
|
||||||
|
<module name="ProjectOctopus" />
|
||||||
|
<target name="wdd-dev-35.70" />
|
||||||
|
<working_directory value="$PROJECT_DIR$/agent-operator/image" />
|
||||||
|
<root_directory value="$PROJECT_DIR$/agent-operator" />
|
||||||
|
<kind value="PACKAGE" />
|
||||||
|
<package value="wdd.io/agent-operator/image" />
|
||||||
|
<directory value="$PROJECT_DIR$" />
|
||||||
|
<filePath value="$PROJECT_DIR$" />
|
||||||
|
<option name="build_on_remote_target" value="true" />
|
||||||
|
<framework value="gotest" />
|
||||||
|
<pattern value="^\QTestHarborOperator_CmiiHarborCleanUp\E$" />
|
||||||
|
<method v="2" />
|
||||||
|
</configuration>
|
||||||
|
</component>
|
||||||
23
agent-common/SplitProject/doris-25.2.1/.helmignore
Normal file
23
agent-common/SplitProject/doris-25.2.1/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
21
agent-common/SplitProject/doris-25.2.1/Chart.yaml
Normal file
21
agent-common/SplitProject/doris-25.2.1/Chart.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
appVersion: 2.1.7
|
||||||
|
description: Apache Doris is an easy-to-use, high-performance and real-time analytical
|
||||||
|
database based on MPP architecture, known for its extreme speed and ease of use.
|
||||||
|
home: https://doris.apache.org
|
||||||
|
icon: https://charts.selectdb.com/images/doris.jpg
|
||||||
|
keywords:
|
||||||
|
- doris
|
||||||
|
- database
|
||||||
|
- olap
|
||||||
|
- sharding
|
||||||
|
kubeVersion: '>= 1.19'
|
||||||
|
maintainers:
|
||||||
|
- email: hudechao@selectdb.com
|
||||||
|
name: Dechao Hu
|
||||||
|
name: doris
|
||||||
|
sources:
|
||||||
|
- https://github.com/apache/doris
|
||||||
|
- https://github.com/apache/doris-operator
|
||||||
|
type: application
|
||||||
|
version: 25.2.1
|
||||||
61
agent-common/SplitProject/doris-25.2.1/README.md
Normal file
61
agent-common/SplitProject/doris-25.2.1/README.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Deploy Doris Cluster by Helm
|
||||||
|
[](https://artifacthub.io/packages/search?repo=doris)
|
||||||
|
|
||||||
|
This chart for deploying doris on kubernetes use Doris-Operator. Before using this chart, please install doris-operator as [install doc](https://artifacthub.io/packages/helm/doris/doris-operator).
|
||||||
|
|
||||||
|
## Install doris
|
||||||
|
|
||||||
|
### Add helm-chart repo and install doris-operator
|
||||||
|
this document and doris-operator installation document are duplicated. you can skip If they have already been executed completely.
|
||||||
|
1. Add the selectdb repository
|
||||||
|
```Bash
|
||||||
|
$ helm repo add selectdb https://charts.selectdb.com
|
||||||
|
```
|
||||||
|
2. Update the Helm Chart Repo to the latest version
|
||||||
|
```Bash
|
||||||
|
$ helm repo update selectdb
|
||||||
|
```
|
||||||
|
3. Check the Helm Chart Repo is the latest version
|
||||||
|
```Bash
|
||||||
|
$ helm search repo selectdb
|
||||||
|
NAME CHART VERSION APP VERSION DESCRIPTION
|
||||||
|
selectdb/doris-operator 1.3.1 1.3.1 Doris-operator for doris creat ...
|
||||||
|
selectdb/doris 1.3.1 2.0.3 Apache Doris is an easy-to-use ...
|
||||||
|
```
|
||||||
|
4. Install doris-operator (with default config in a namespace named `doris`)
|
||||||
|
```Bash
|
||||||
|
$ helm install operator selectdb/doris-operator
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install the doris use doriscluster
|
||||||
|
- Use default config for deploying doris
|
||||||
|
This deploy only deploy fe and be components using default storageClass for providing persistent volume.
|
||||||
|
```bash
|
||||||
|
$ helm install doriscluster selectdb/doris
|
||||||
|
```
|
||||||
|
- Custom doris deploying
|
||||||
|
when you want to specify resources or different deployment type, please custom the [`values.yaml`](./values.yaml) and use next command for deploying.
|
||||||
|
```bash
|
||||||
|
$ helm install -f values.yaml doriscluster selectdb/doris
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validate installation status
|
||||||
|
After executing the installation command, deployment and distribution, service deployment scheduling and startup will take a certain amount of time. Check the deployment status of Pods through the kubectl get pods command.
|
||||||
|
Observe that the Pod of `doriscluster` is in the `Running` state and all containers in the Pod are ready, that means, the deployment is successful.
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
$ kubectl get pod --namespace doris
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
doriscluster-helm-fe-0 1/1 Running 0 1m39s
|
||||||
|
doriscluster-helm-fe-1 1/1 Running 0 1m39s
|
||||||
|
doriscluster-helm-fe-2 1/1 Running 0 1m39s
|
||||||
|
doriscluster-helm-be-0 1/1 Running 0 16s
|
||||||
|
doriscluster-helm-be-1 1/1 Running 0 16s
|
||||||
|
doriscluster-helm-be-2 1/1 Running 0 16s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall doriscluster
|
||||||
|
Please confirm the Doris is not used, when using next command to uninstall `doriscluster`.
|
||||||
|
```bash
|
||||||
|
$ helm uninstall doriscluster
|
||||||
|
```
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Thank you for installing {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||||
267
agent-common/SplitProject/doris-25.2.1/templates/_helpers.tpl
Normal file
267
agent-common/SplitProject/doris-25.2.1/templates/_helpers.tpl
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
{*
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
or more contributor license agreements. See the NOTICE file
|
||||||
|
distributed with this work for additional information
|
||||||
|
regarding copyright ownership. The ASF licenses this file
|
||||||
|
to you under the Apache License, Version 2.0 (the
|
||||||
|
"License"); you may not use this file except in compliance
|
||||||
|
with the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing,
|
||||||
|
software distributed under the License is distributed on an
|
||||||
|
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
*}
|
||||||
|
|
||||||
|
{{- define "doriscluster.name" -}}
|
||||||
|
{{ default .Chart.Name .Values.dorisCluster.name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "doriscluster.namespace" -}}
|
||||||
|
{{ print .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "kube-control.name" -}}
|
||||||
|
{{- print "doris-operator" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster pod default resource.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.resource" }}
|
||||||
|
requests:
|
||||||
|
cpu: 8
|
||||||
|
memory: 16Gi
|
||||||
|
limits:
|
||||||
|
cpu: 16
|
||||||
|
memory: 32Gi
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster admin user and password secret name.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.secret.name" -}}
|
||||||
|
{{ template "doriscluster.name" . }}-secret-base64
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster fe configMap default name.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.feConfigMap.name" -}}
|
||||||
|
{{ template "doriscluster.name" . }}-fe-configmap
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster fe pod default configMap resolve file.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.feConfig.resolveKey" }}
|
||||||
|
{{- print "fe.conf" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster be configMap default name.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.beConfigMap.name" -}}
|
||||||
|
{{ template "doriscluster.name" . }}-be-configmap
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster pod default configMap resolve file.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.beConfig.resolveKey" }}
|
||||||
|
{{- print "be.conf" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster cn configMap default name.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.cnConfigMap.name" -}}
|
||||||
|
{{ template "doriscluster.name" . }}-cn-configmap
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster cn pod default configMap resolve file.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.cnConfig.resolveKey" }}
|
||||||
|
{{- print "be.conf" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster broker configMap default name.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.brokerConfigMap.name" -}}
|
||||||
|
{{ template "doriscluster.name" . }}-broker-configmap
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster broker pod default configMap resolve file.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.brokerConfig.resolveKey" }}
|
||||||
|
{{- print "apache_hdfs_broker.conf" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster cn pod autoscaler default version.
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.default.autoScalerVersion" -}}
|
||||||
|
{{- print "v2" }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster fe PVC
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.fe.pvc" -}}
|
||||||
|
|
||||||
|
{{- if and .Values.feSpec.persistentVolumeClaim.metaPersistentVolume .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/fe/doris-meta
|
||||||
|
name: fe-meta
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
storageClassName: {{ default .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and .Values.feSpec.persistentVolumeClaim.logsPersistentVolume .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/fe/log
|
||||||
|
name: fe-log
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName}}
|
||||||
|
storageClassName: {{ default .Values.feSpec.persistentVolumeClaim.metaPersistentVolume.storageClassName .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.feSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster be PVC
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.be.pvc" -}}
|
||||||
|
|
||||||
|
{{- if and .Values.beSpec.persistentVolumeClaim.dataPersistentVolume .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/be/storage
|
||||||
|
name: be-storage
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
storageClassName: {{ default .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and .Values.beSpec.persistentVolumeClaim.logsPersistentVolume .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/be/log
|
||||||
|
name: be-log
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
storageClassName: {{ default .Values.beSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.beSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster cn PVC
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.cn.pvc" -}}
|
||||||
|
|
||||||
|
{{- if and .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/be/storage
|
||||||
|
name: cn-storage
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
storageClassName: {{ default .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/be/log
|
||||||
|
name: cn-log
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if or .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
storageClassName: {{ default .Values.cnSpec.persistentVolumeClaim.dataPersistentVolume.storageClassName .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.cnSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
doris cluster broker PVC
|
||||||
|
*/}}
|
||||||
|
{{- define "doriscluster.broker.pvc" -}}
|
||||||
|
{{- if and .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
- mountPath: /opt/apache-doris/apache_hdfs_broker/log
|
||||||
|
name: broker-log
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
{{- if .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName}}
|
||||||
|
storageClassName: {{ .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
|
||||||
|
{{- end }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ .Values.brokerSpec.persistentVolumeClaim.logsPersistentVolume.storage}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
|
{{- define "doriscluster.feConfig.configMaps" }}
|
||||||
|
{{- range .Values.feSpec.configMap.mountConfigMaps }}
|
||||||
|
- configMapName: {{ .configMapName }}
|
||||||
|
mountPath: {{ .mountPath }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{- define "doriscluster.beConfig.configMaps" }}
|
||||||
|
{{- range .Values.beSpec.configMap.mountConfigMaps }}
|
||||||
|
- configMapName: {{ .configMapName }}
|
||||||
|
mountPath: {{ .mountPath }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{- define "doriscluster.cnConfig.configMaps" }}
|
||||||
|
{{- range .Values.cnSpec.configMap.mountConfigMaps }}
|
||||||
|
- configMapName: {{ .configMapName }}
|
||||||
|
mountPath: {{ .mountPath }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{- define "doriscluster.brokerConfig.configMaps" }}
|
||||||
|
{{- range .Values.brokerSpec.configMap.mountConfigMaps }}
|
||||||
|
- configMapName: {{ .configMapName }}
|
||||||
|
mountPath: {{ .mountPath }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{- $originalMap := omit .Values.beSpec.configMap "mountConfigMaps" }}
|
||||||
|
{{- if $originalMap }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.default.beConfigMap.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
data:
|
||||||
|
{{- toYaml $originalMap | nindent 2}}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{- $originalMap := omit .Values.brokerSpec.configMap "mountConfigMaps" }}
|
||||||
|
{{- if $originalMap }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.default.brokerConfigMap.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
data:
|
||||||
|
{{- toYaml $originalMap | nindent 2}}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{- $originalMap := omit .Values.cnSpec.configMap "mountConfigMaps" }}
|
||||||
|
{{- if $originalMap }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.default.cnConfigMap.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
data:
|
||||||
|
{{- toYaml $originalMap | nindent 2}}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{- $originalMap := omit .Values.feSpec.configMap "mountConfigMaps" }}
|
||||||
|
{{- if $originalMap }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.default.feConfigMap.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
data:
|
||||||
|
{{- toYaml $originalMap | nindent 2}}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,56 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{$cmDatas := list }}
|
||||||
|
{{- range $cp := .Values.feSpec.configMap.mountConfigMaps }}
|
||||||
|
{{- if $cp.data }}
|
||||||
|
{{- $cmDatas = append $cmDatas $cp }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $cp := .Values.beSpec.configMap.mountConfigMaps }}
|
||||||
|
{{- if $cp.data }}
|
||||||
|
{{- $cmDatas = append $cmDatas $cp }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $cp := .Values.brokerSpec.configMap.mountConfigMaps }}
|
||||||
|
{{- if $cp.data }}
|
||||||
|
{{- $cmDatas = append $cmDatas $cp }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $cp := .Values.cnSpec.configMap.mountConfigMaps }}
|
||||||
|
{{- if $cp.data }}
|
||||||
|
{{- $cmDatas = append $cmDatas $cp }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $cp := $cmDatas }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ $cp.configMapName }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" $ }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" $ }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" $ }}
|
||||||
|
data:
|
||||||
|
{{- toYaml $cp.data | nindent 2 }}
|
||||||
|
---
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,368 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
# This yaml have dorisCluster CRD all fields. The yaml only for reference how to config, not for deploy on kubernetes.
|
||||||
|
|
||||||
|
apiVersion: doris.selectdb.com/v1
|
||||||
|
kind: DorisCluster
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
{{- if .Values.dorisCluster.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml .Values.dorisCluster.annotations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.dorisCluster.adminUser }}
|
||||||
|
adminUser:
|
||||||
|
{{- toYaml .Values.dorisCluster.adminUser | nindent 4 }}
|
||||||
|
{{- else if .Values.dorisCluster.authSecret }}
|
||||||
|
authSecret: {{ template "doriscluster.secret.name" . }}
|
||||||
|
{{- end }}
|
||||||
|
feSpec:
|
||||||
|
replicas: {{ .Values.feSpec.replicas }}
|
||||||
|
{{- if .Values.feSpec.electionNumber }}
|
||||||
|
electionNumber: {{ .Values.feSpec.electionNumber }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.labels }}
|
||||||
|
podLabels:
|
||||||
|
{{- toYaml .Values.feSpec.labels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
image: {{ .Values.feSpec.image.repository }}:{{ .Values.feSpec.image.tag }}
|
||||||
|
{{- if .Values.feSpec.image.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml .Values.feSpec.image.imagePullSecrets | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.feSpec.service }}
|
||||||
|
service:
|
||||||
|
type: {{ .Values.feSpec.service.type }}
|
||||||
|
{{- if and (eq "LoadBalancer" .Values.feSpec.service.type) .Values.feSpec.service.loadbalancerIP }}
|
||||||
|
loadBalancerIP: {{ .Values.feSpec.service.loadbalancerIP }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.service.servicePorts }}
|
||||||
|
servicePorts:
|
||||||
|
{{- toYaml .Values.feSpec.service.servicePorts | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml .Values.feSpec.service.annotations | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.feSpec.configMap }}
|
||||||
|
configMapInfo:
|
||||||
|
{{- $originalMap := .Values.feSpec.configMap }}
|
||||||
|
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
|
||||||
|
{{- if $defaultMap }}
|
||||||
|
configMapName: {{ template "doriscluster.default.feConfigMap.name" . }}
|
||||||
|
resolveKey: {{ template "doriscluster.default.feConfig.resolveKey" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.configMap.mountConfigMaps }}
|
||||||
|
configMaps:
|
||||||
|
{{- template "doriscluster.feConfig.configMaps" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.feSpec.resource }}
|
||||||
|
{{- toYaml .Values.feSpec.resource | nindent 4 }}
|
||||||
|
{{- else }}
|
||||||
|
{{- include "doriscluster.default.resource" .}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{- if .Values.feSpec.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml .Values.feSpec.nodeSelector | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.env }}
|
||||||
|
envVars:
|
||||||
|
{{- toYaml .Values.feSpec.env | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml .Values.feSpec.affinity | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml .Values.feSpec.tolerations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.hostAliases }}
|
||||||
|
hostAliases:
|
||||||
|
{{- toYaml .Values.feSpec.hostAliases | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.persistentVolumeClaim }}
|
||||||
|
persistentVolumes:
|
||||||
|
{{- template "doriscluster.fe.pvc" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.systemInitialization }}
|
||||||
|
systemInitialization:
|
||||||
|
{{- if .Values.feSpec.systemInitialization.initImage }}
|
||||||
|
initImage: {{ .Values.feSpec.systemInitialization.initImage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.feSpec.systemInitialization.command }}
|
||||||
|
command: {{ .Values.feSpec.systemInitialization.command }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
beSpec:
|
||||||
|
replicas: {{ .Values.beSpec.replicas }}
|
||||||
|
{{- if .Values.beSpec.labels }}
|
||||||
|
podLabels:
|
||||||
|
{{- toYaml .Values.beSpec.labels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
image: {{ .Values.beSpec.image.repository }}:{{ .Values.beSpec.image.tag }}
|
||||||
|
{{- if .Values.beSpec.image.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml .Values.beSpec.image.imagePullSecrets | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.beSpec.service }}
|
||||||
|
service:
|
||||||
|
type: {{ .Values.beSpec.service.type }}
|
||||||
|
{{- if and (eq "LoadBalancer" .Values.beSpec.service.type) .Values.beSpec.service.loadbalancerIP }}
|
||||||
|
loadBalancerIP: {{ .Values.beSpec.service.loadbalancerIP }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.service.servicePorts }}
|
||||||
|
servicePorts:
|
||||||
|
{{- toYaml .Values.beSpec.service.servicePorts | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml .Values.beSpec.service.annotations | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.beSpec.configMap }}
|
||||||
|
configMapInfo:
|
||||||
|
{{- $originalMap := .Values.beSpec.configMap }}
|
||||||
|
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
|
||||||
|
{{- if $defaultMap }}
|
||||||
|
configMapName: {{ template "doriscluster.default.beConfigMap.name" . }}
|
||||||
|
resolveKey: {{ template "doriscluster.default.beConfig.resolveKey" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.configMap.mountConfigMaps }}
|
||||||
|
configMaps:
|
||||||
|
{{- template "doriscluster.beConfig.configMaps" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.beSpec.resource }}
|
||||||
|
{{- toYaml .Values.beSpec.resource | nindent 4 }}
|
||||||
|
{{- else }}
|
||||||
|
{{- include "doriscluster.default.resource" .}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.beSpec.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml .Values.beSpec.nodeSelector | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.env }}
|
||||||
|
envVars:
|
||||||
|
{{- toYaml .Values.beSpec.env | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml .Values.beSpec.affinity | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml .Values.beSpec.tolerations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.hostAliases }}
|
||||||
|
hostAliases:
|
||||||
|
{{- toYaml .Values.beSpec.hostAliases | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.persistentVolumeClaim }}
|
||||||
|
persistentVolumes:
|
||||||
|
{{- template "doriscluster.be.pvc" . }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.beSpec.systemInitialization }}
|
||||||
|
systemInitialization:
|
||||||
|
{{- if .Values.beSpec.systemInitialization.initImage }}
|
||||||
|
initImage: {{ .Values.beSpec.systemInitialization.initImage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.beSpec.systemInitialization.command }}
|
||||||
|
command: {{ .Values.beSpec.systemInitialization.command }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.dorisCluster.enabledCn }}
|
||||||
|
cnSpec:
|
||||||
|
replicas: {{ .Values.cnSpec.replicas }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.labels }}
|
||||||
|
podLabels:
|
||||||
|
{{- toYaml .Values.cnSpec.labels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
image: {{ .Values.cnSpec.image.repository }}:{{ .Values.cnSpec.image.tag }}
|
||||||
|
{{- if .Values.cnSpec.image.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml .Values.cnSpec.image.imagePullSecrets | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.service }}
|
||||||
|
service:
|
||||||
|
type: {{ .Values.cnSpec.service.type }}
|
||||||
|
{{- if and (eq "LoadBalancer" .Values.cnSpec.service.type) .Values.cnSpec.service.loadbalancerIP }}
|
||||||
|
loadBalancerIP: {{ .Values.cnSpec.service.loadbalancerIP }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.service.servicePorts }}
|
||||||
|
servicePorts:
|
||||||
|
{{- toYaml .Values.cnSpec.service.servicePorts | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml .Values.cnSpec.service.annotations | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.configMap }}
|
||||||
|
configMapInfo:
|
||||||
|
{{- $originalMap := .Values.cnSpec.configMap }}
|
||||||
|
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
|
||||||
|
{{- if $defaultMap }}
|
||||||
|
configMapName: {{ template "doriscluster.default.cnConfigMap.name" . }}
|
||||||
|
resolveKey: {{ template "doriscluster.default.cnConfig.resolveKey" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.configMap.mountConfigMaps }}
|
||||||
|
configMaps:
|
||||||
|
{{- template "doriscluster.cnConfig.configMaps" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.resource }}
|
||||||
|
{{- toYaml .Values.cnSpec.resource | nindent 4 }}
|
||||||
|
{{- else }}
|
||||||
|
{{- include "doriscluster.default.resource" .}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml .Values.cnSpec.nodeSelector | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.env }}
|
||||||
|
envVars:
|
||||||
|
{{- toYaml .Values.cnSpec.env | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml .Values.cnSpec.affinity | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml .Values.cnSpec.tolerations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.hostAliases }}
|
||||||
|
hostAliases:
|
||||||
|
{{- toYaml .Values.cnSpec.hostAliases | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.cnSpec.persistentVolumeClaim }}
|
||||||
|
persistentVolumes:
|
||||||
|
{{- template "doriscluster.cn.pvc" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.systemInitialization }}
|
||||||
|
systemInitialization:
|
||||||
|
{{- if .Values.cnSpec.systemInitialization.initImage }}
|
||||||
|
initImage: {{ .Values.cnSpec.systemInitialization.initImage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.systemInitialization.command }}
|
||||||
|
command: {{ .Values.cnSpec.systemInitialization.command }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.cnSpec.autoScalingPolicy.enable }}
|
||||||
|
autoScalingPolicy:
|
||||||
|
version: {{default ( include "doriscluster.default.autoScalerVersion" . ) .Values.cnSpec.autoScalingPolicy.version }}
|
||||||
|
minReplicas: {{ .Values.cnSpec.autoScalingPolicy.minReplicas}}
|
||||||
|
maxReplicas: {{ .Values.cnSpec.autoScalingPolicy.maxReplicas}}
|
||||||
|
hpaPolicy:
|
||||||
|
{{- toYaml .Values.cnSpec.autoScalingPolicy.hpaPolicy | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.dorisCluster.enabledBroker }}
|
||||||
|
brokerSpec:
|
||||||
|
replicas: {{ .Values.brokerSpec.replicas }}
|
||||||
|
|
||||||
|
{{- if .Values.brokerSpec.labels }}
|
||||||
|
podLabels:
|
||||||
|
{{- toYaml .Values.brokerSpec.labels | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
image: {{ .Values.brokerSpec.image.repository }}:{{ .Values.brokerSpec.image.tag }}
|
||||||
|
{{- if .Values.brokerSpec.image.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml .Values.brokerSpec.image.imagePullSecrets | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.brokerSpec.configMap }}
|
||||||
|
configMapInfo:
|
||||||
|
{{- $originalMap := .Values.brokerSpec.configMap }}
|
||||||
|
{{- $defaultMap := omit $originalMap "mountConfigMaps" }}
|
||||||
|
{{- if $defaultMap }}
|
||||||
|
configMapName: {{ template "doriscluster.default.brokerConfigMap.name" . }}
|
||||||
|
resolveKey: {{ template "doriscluster.default.brokerConfig.resolveKey" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.configMap.mountConfigMaps }}
|
||||||
|
configMaps:
|
||||||
|
{{- template "doriscluster.brokerConfig.configMaps" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.brokerSpec.resource }}
|
||||||
|
{{- toYaml .Values.brokerSpec.resource | nindent 4 }}
|
||||||
|
{{- else }}
|
||||||
|
{{- include "doriscluster.default.resource" .}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.brokerSpec.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml .Values.brokerSpec.nodeSelector | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.env }}
|
||||||
|
envVars:
|
||||||
|
{{- toYaml .Values.brokerSpec.env | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml .Values.brokerSpec.affinity | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml .Values.brokerSpec.tolerations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.hostAliases }}
|
||||||
|
hostAliases:
|
||||||
|
{{- toYaml .Values.brokerSpec.hostAliases | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.persistentVolumeClaim }}
|
||||||
|
persistentVolumes:
|
||||||
|
{{- template "doriscluster.broker.pvc" . }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.systemInitialization }}
|
||||||
|
systemInitialization:
|
||||||
|
{{- if .Values.brokerSpec.systemInitialization.initImage }}
|
||||||
|
initImage: {{ .Values.brokerSpec.systemInitialization.initImage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.brokerSpec.systemInitialization.command }}
|
||||||
|
command: {{ .Values.brokerSpec.systemInitialization.command }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
31
agent-common/SplitProject/doris-25.2.1/templates/secret.yaml
Normal file
31
agent-common/SplitProject/doris-25.2.1/templates/secret.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
{{- if .Values.dorisCluster.authSecret }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ template "doriscluster.secret.name" . }}
|
||||||
|
namespace: {{ template "doriscluster.namespace" . }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
app.kubernetes.io/instance: {{ template "doriscluster.name" . }}
|
||||||
|
app.kubernetes.io/part-of: {{ template "kube-control.name" . }}
|
||||||
|
type: kubernetes.io/basic-auth
|
||||||
|
data:
|
||||||
|
{{- toYaml .Values.dorisCluster.authSecret | nindent 2}}
|
||||||
|
{{- end }}
|
||||||
697
agent-common/SplitProject/doris-25.2.1/values.yaml
Normal file
697
agent-common/SplitProject/doris-25.2.1/values.yaml
Normal file
@@ -0,0 +1,697 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
# deploy a doris cluster
|
||||||
|
dorisCluster:
|
||||||
|
# the name of doriscluster, if not set, the chart name will be used.
|
||||||
|
name: "doriscluster-helm"
|
||||||
|
# annotations for doris cluster.
|
||||||
|
annotations: {}
|
||||||
|
# specify the cn deploy or not.
|
||||||
|
enabledCn: false
|
||||||
|
# specify the broker deploy or not.
|
||||||
|
enabledBroker: false
|
||||||
|
# specify the doriscluster adminuser or not.
|
||||||
|
# set adminUser will record the password in clear text. This method has been abandoned and is not recommended.
|
||||||
|
# When adding adminuser. should follow the steps:
|
||||||
|
# 1. Start dorisclusert without adminUser
|
||||||
|
# 2. Run 'CREATE USER ...' or 'SET PASSWORD ...' in mysql client
|
||||||
|
# 3. Add the name and password added in the previous step to adminUser and execute helm
|
||||||
|
adminUser: {}
|
||||||
|
# name: root
|
||||||
|
# password: "12345"
|
||||||
|
# use authSecret can encrypt and save passwords, recommend this method
|
||||||
|
# 1. run shell: echo -n '{your_password}' | base64 to get password base64 string
|
||||||
|
# 2. run shell: echo -n '{your_user}' | base64 to get user base64 string
|
||||||
|
# 3. Fill the encrypted string into the corresponding position
|
||||||
|
# as follow, username is 'root' , password is 't0p-Secret'
|
||||||
|
authSecret: {}
|
||||||
|
# username: cm9vdA==
|
||||||
|
# password: dDBwLVNlY3JldA==
|
||||||
|
|
||||||
|
feSpec:
|
||||||
|
replicas: 3
|
||||||
|
# electionNumber represents `FOLLOWER` number, replicas - electionNumber as `OBSERVER`
|
||||||
|
# electionNumber: 3
|
||||||
|
# the pod labels for user select or classify pods.
|
||||||
|
labels: {}
|
||||||
|
# selectdb.app.classify: "classify-1"
|
||||||
|
image:
|
||||||
|
repository: apache/doris
|
||||||
|
tag: fe-2.1.7
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: imagePullSecret
|
||||||
|
# config the service port for fe service.
|
||||||
|
# if you want to use a dedicated port for fe service, you can config the port.
|
||||||
|
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
|
||||||
|
service:
|
||||||
|
# the fe service type, only supported ClusterIP, NodePort, LoadBalancer
|
||||||
|
type: ClusterIP
|
||||||
|
# type: NodePort
|
||||||
|
# servicePorts:
|
||||||
|
# - nodePort: 30001
|
||||||
|
# targetPort: 9030
|
||||||
|
# - nodePort: 30002
|
||||||
|
# targetPort: 8030
|
||||||
|
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
|
||||||
|
# type: "LoadBalancer"
|
||||||
|
# loadbalancerIP: "127.0.0.1"
|
||||||
|
# use LoadBalancer on cloud platform, try to configure it using annotations:
|
||||||
|
annotations: {}
|
||||||
|
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
|
||||||
|
# specify the configMap, as a dictionary.
|
||||||
|
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
|
||||||
|
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
|
||||||
|
configMap: {}
|
||||||
|
# 'fe.conf' is the startup config file of fe, and the name must be 'fe.conf'.
|
||||||
|
# fe.conf: |
|
||||||
|
# http_port = 8030
|
||||||
|
# rpc_port = 9020
|
||||||
|
# query_port = 9030
|
||||||
|
# edit_log_port = 9010
|
||||||
|
# Other config files can also be specified here
|
||||||
|
# hdfs-site.xml: |
|
||||||
|
# <?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
#
|
||||||
|
# <configuration>
|
||||||
|
# <property>
|
||||||
|
# <name>dfs.nameservices</name>
|
||||||
|
# <value>ns</value>
|
||||||
|
# </property>
|
||||||
|
#
|
||||||
|
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
|
||||||
|
# but please note that the mounting path cannot be repeated.
|
||||||
|
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
|
||||||
|
# in the following example, the user needs to manually create configmap 'cm-1'
|
||||||
|
# mountConfigMaps: {}
|
||||||
|
# - configMapName: cm-1
|
||||||
|
# mountPath: /etc/cm1
|
||||||
|
# - configMapName: cm-2
|
||||||
|
# mountPath: /etc/cm2
|
||||||
|
# data:
|
||||||
|
# copy_file1: |
|
||||||
|
# text *** content
|
||||||
|
# copy_file2: |
|
||||||
|
# text *** content
|
||||||
|
|
||||||
|
# If configured separately here, it will overwrite the total resources configuration default.
|
||||||
|
# but the default configuration of other types will still take effect.
|
||||||
|
resource: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 8
|
||||||
|
# memory: 16Gi
|
||||||
|
# limits:
|
||||||
|
# cpu: 16
|
||||||
|
# memory: 32Gi
|
||||||
|
# If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes"
|
||||||
|
nodeSelector: {}
|
||||||
|
# kubernetes.io/arch: amd64
|
||||||
|
# kubernetes.io/os: linux
|
||||||
|
# app.kubernetes.node.name: "k8s-node1"
|
||||||
|
# env represents an environment variable present in a Container.
|
||||||
|
# 'name' of the environment variable. Must be a C_IDENTIFIER.
|
||||||
|
# no more than one of the following may be specified: 'value' or 'valueFrom',
|
||||||
|
# 'value' is the value of environment.
|
||||||
|
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
|
||||||
|
env: []
|
||||||
|
# - name: "HOME"
|
||||||
|
# value: "/opt/selectdb"
|
||||||
|
# - name: HOST_IP
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# apiVersion: v1
|
||||||
|
# fieldPath: status.hostIP
|
||||||
|
|
||||||
|
# affinity is the constraints of pod scheduling
|
||||||
|
# nodeAffinity: Describes node affinity scheduling rules for the pod.
|
||||||
|
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||||
|
affinity: {}
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "k8s-node1"
|
||||||
|
# "k8s-node2"
|
||||||
|
# podAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "NotIn"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
|
||||||
|
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
# the triple <key,value,effect> using the matching operator <operator>.
|
||||||
|
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
|
||||||
|
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
|
||||||
|
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
|
||||||
|
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||||
|
tolerations: []
|
||||||
|
# - key: "toleration"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "master"
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - operator: "Exists"
|
||||||
|
|
||||||
|
# hostAliases allows adding entries to /etc/hosts inside the containers
|
||||||
|
hostAliases: []
|
||||||
|
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
|
||||||
|
# - ip: "127.0.0.1"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname1"
|
||||||
|
# - ip: "127.0.0.2"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname2"
|
||||||
|
persistentVolumeClaim: {}
|
||||||
|
# meta volume, mountPath is /opt/apache-doris/fe/doris-meta
|
||||||
|
# metaPersistentVolume:
|
||||||
|
# storage: 200Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
# logs volume, mountPath is /opt/apache-doris/fe/log
|
||||||
|
# logsPersistentVolume:
|
||||||
|
# storage: 100Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
systemInitialization: {}
|
||||||
|
# initImage: "selectdb/alpine:latest"
|
||||||
|
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
|
||||||
|
|
||||||
|
beSpec:
|
||||||
|
replicas: 3
|
||||||
|
# the pod labels for user select or classify pods.
|
||||||
|
labels: {}
|
||||||
|
# selectdb.app.classify: "classify-1"
|
||||||
|
image:
|
||||||
|
repository: apache/doris
|
||||||
|
tag: be-2.1.7
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: imagePullSecret
|
||||||
|
# config the service port for be service.
|
||||||
|
# if you want to use a dedicated port for be service, you can config the port.
|
||||||
|
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
|
||||||
|
service:
|
||||||
|
# the be service type, only supported ClusterIP, NodePort, LoadBalancer
|
||||||
|
type: ClusterIP
|
||||||
|
# type: NodePort
|
||||||
|
# servicePorts:
|
||||||
|
# - nodePort: 30003
|
||||||
|
# targetPort: 9040
|
||||||
|
# - nodePort: 30004
|
||||||
|
# targetPort: 8040
|
||||||
|
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
|
||||||
|
# type: "LoadBalancer"
|
||||||
|
# loadbalancerIP: "127.0.0.1"
|
||||||
|
# use LoadBalancer on cloud platform, try to configure it using annotations
|
||||||
|
annotations: {}
|
||||||
|
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
|
||||||
|
# specify the configMap, as a dictionary.
|
||||||
|
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
|
||||||
|
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
|
||||||
|
configMap: {}
|
||||||
|
# 'be.conf' is the startup config file of be, and the name must be 'be.conf'.
|
||||||
|
# be.conf: |
|
||||||
|
# be_port = 9060
|
||||||
|
# webserver_port = 8040
|
||||||
|
# heartbeat_service_port = 9050
|
||||||
|
# brpc_port = 8060
|
||||||
|
# storage_root_path = /data/doris/be/storage
|
||||||
|
# Other config files can also be specified here
|
||||||
|
# hdfs-site.xml: |
|
||||||
|
# <?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
#
|
||||||
|
# <configuration>
|
||||||
|
# <property>
|
||||||
|
# <name>dfs.nameservices</name>
|
||||||
|
# <value>ns</value>
|
||||||
|
# </property>
|
||||||
|
#
|
||||||
|
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
|
||||||
|
# but please note that the mounting path cannot be repeated.
|
||||||
|
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
|
||||||
|
# in the following example, the user needs to manually create configmap 'cm-1'
|
||||||
|
# mountConfigMaps: {}
|
||||||
|
# - configMapName: cm-1
|
||||||
|
# mountPath: /etc/cm1
|
||||||
|
# - configMapName: cm-2
|
||||||
|
# mountPath: /etc/cm2
|
||||||
|
# data:
|
||||||
|
# copy_file1: |
|
||||||
|
# text *** content
|
||||||
|
# copy_file2: |
|
||||||
|
# text *** content
|
||||||
|
|
||||||
|
# If configured separately here, it will overwrite the total resources configuration default.
|
||||||
|
# but the default configuration of other types will still take effect.
|
||||||
|
resource: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 8
|
||||||
|
# memory: 16Gi
|
||||||
|
# limits:
|
||||||
|
# cpu: 16
|
||||||
|
# memory: 32Gi
|
||||||
|
# If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes"
|
||||||
|
nodeSelector: {}
|
||||||
|
# kubernetes.io/arch: amd64
|
||||||
|
# kubernetes.io/os: linux
|
||||||
|
# app.kubernetes.node.name: "k8s-node1"
|
||||||
|
# env represents an environment variable present in a Container.
|
||||||
|
# 'name' of the environment variable. Must be a C_IDENTIFIER.
|
||||||
|
# no more than one of the following may be specified: 'value' or 'valueFrom',
|
||||||
|
# 'value' is the value of environment.
|
||||||
|
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
|
||||||
|
env: []
|
||||||
|
# - name: "HOME"
|
||||||
|
# value: "/opt/selectdb"
|
||||||
|
# - name: HOST_IP
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# apiVersion: v1
|
||||||
|
# fieldPath: status.hostIP
|
||||||
|
|
||||||
|
# affinity is the constraints of pod scheduling
|
||||||
|
# nodeAffinity: Describes node affinity scheduling rules for the pod.
|
||||||
|
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||||
|
affinity: {}
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "k8s-node1"
|
||||||
|
# "k8s-node2"
|
||||||
|
# podAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "NotIn"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
|
||||||
|
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
# the triple <key,value,effect> using the matching operator <operator>.
|
||||||
|
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
|
||||||
|
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
|
||||||
|
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
|
||||||
|
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||||
|
tolerations: []
|
||||||
|
# - key: "toleration"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "master"
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - operator: "Exists"
|
||||||
|
|
||||||
|
# hostAliases allows adding entries to /etc/hosts inside the containers
|
||||||
|
hostAliases: []
|
||||||
|
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
|
||||||
|
# - ip: "127.0.0.1"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname1"
|
||||||
|
# - ip: "127.0.0.2"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname2"
|
||||||
|
persistentVolumeClaim: {}
|
||||||
|
# data volume, mountPath is /opt/apache-doris/be/storage
|
||||||
|
# dataPersistentVolume:
|
||||||
|
# storage: 200Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
# logs volume, mountPath is /opt/apache-doris/be/log
|
||||||
|
# logsPersistentVolume:
|
||||||
|
# storage: 100Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
systemInitialization: {}
|
||||||
|
# initImage: "selectdb/alpine:latest"
|
||||||
|
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
|
||||||
|
|
||||||
|
cnSpec:
|
||||||
|
replicas: 3
|
||||||
|
# the pod labels for user select or classify pods.
|
||||||
|
labels: {}
|
||||||
|
# selectdb.app.classify: "classify-1"
|
||||||
|
image:
|
||||||
|
repository: apache/doris
|
||||||
|
tag: be-2.1.7
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: imagePullSecret
|
||||||
|
# config the service port for cn service.
|
||||||
|
# if you want to use a dedicated port for cn service, you can config the port.
|
||||||
|
# see https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports for more details.
|
||||||
|
service:
|
||||||
|
# the cn service type, only supported ClusterIP, NodePort, LoadBalancer
|
||||||
|
type: ClusterIP
|
||||||
|
# type: NodePort
|
||||||
|
# servicePorts:
|
||||||
|
# - nodePort: 30005
|
||||||
|
# targetPort: 9040
|
||||||
|
# - nodePort: 30006
|
||||||
|
# targetPort: 8040
|
||||||
|
# type: "LoadBalancer"
|
||||||
|
# the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty.
|
||||||
|
# loadbalancerIP: "127.0.0.1"
|
||||||
|
# use LoadBalancer on cloud platform, try to configure it using annotations
|
||||||
|
annotations: {}
|
||||||
|
# service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
|
||||||
|
# specify the configMap, as a dictionary.
|
||||||
|
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
|
||||||
|
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
|
||||||
|
configMap: {}
|
||||||
|
# 'be.conf' is the startup config file of cn, and the name must be 'be.conf'.
|
||||||
|
# be.conf: |
|
||||||
|
# be_port = 9060
|
||||||
|
# webserver_port = 8040
|
||||||
|
# heartbeat_service_port = 9050
|
||||||
|
# brpc_port = 8060
|
||||||
|
# storage_root_path = /data/doris/be/storage
|
||||||
|
# be_node_role=computation
|
||||||
|
# Other config files can also be specified here
|
||||||
|
# hdfs-site.xml: |
|
||||||
|
# <?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
#
|
||||||
|
# <configuration>
|
||||||
|
# <property>
|
||||||
|
# <name>dfs.nameservices</name>
|
||||||
|
# <value>ns</value>
|
||||||
|
# </property>
|
||||||
|
#
|
||||||
|
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
|
||||||
|
# but please note that the mounting path cannot be repeated.
|
||||||
|
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
|
||||||
|
# in the following example, the user needs to manually create configmap 'cm-1'
|
||||||
|
# mountConfigMaps: {}
|
||||||
|
# - configMapName: cm-1
|
||||||
|
# mountPath: /etc/cm1
|
||||||
|
# - configMapName: cm-2
|
||||||
|
# mountPath: /etc/cm2
|
||||||
|
# data:
|
||||||
|
# copy_file1: |
|
||||||
|
# text *** content
|
||||||
|
# copy_file2: |
|
||||||
|
# text *** content
|
||||||
|
|
||||||
|
# If configured separately here, it will overwrite the total resources configuration default.
|
||||||
|
# but the default configuration of other types will still take effect.
|
||||||
|
resource: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 8
|
||||||
|
# memory: 16Gi
|
||||||
|
# limits:
|
||||||
|
# cpu: 16
|
||||||
|
# memory: 32Gi
|
||||||
|
# If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes"
|
||||||
|
nodeSelector: {}
|
||||||
|
# kubernetes.io/arch: amd64
|
||||||
|
# kubernetes.io/os: linux
|
||||||
|
# app.kubernetes.node.name: "k8s-node1"
|
||||||
|
# env represents an environment variable present in a Container.
|
||||||
|
# 'name' of the environment variable. Must be a C_IDENTIFIER.
|
||||||
|
# no more than one of the following may be specified: 'value' or 'valueFrom',
|
||||||
|
# 'value' is the value of environment.
|
||||||
|
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
|
||||||
|
env: []
|
||||||
|
# - name: "HOME"
|
||||||
|
# value: "/opt/selectdb"
|
||||||
|
# - name: HOST_IP
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# apiVersion: v1
|
||||||
|
# fieldPath: status.hostIP
|
||||||
|
|
||||||
|
# affinity is the constraints of pod scheduling
|
||||||
|
# nodeAffinity: Describes node affinity scheduling rules for the pod.
|
||||||
|
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||||
|
affinity: {}
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "k8s-node1"
|
||||||
|
# "k8s-node2"
|
||||||
|
# podAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "NotIn"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
|
||||||
|
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
# the triple <key,value,effect> using the matching operator <operator>.
|
||||||
|
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
|
||||||
|
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
|
||||||
|
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
|
||||||
|
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||||
|
tolerations: []
|
||||||
|
# - key: "toleration"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "master"
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - operator: "Exists"
|
||||||
|
|
||||||
|
# hostAliases allows adding entries to /etc/hosts inside the containers
|
||||||
|
hostAliases: []
|
||||||
|
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
|
||||||
|
# - ip: "127.0.0.1"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname1"
|
||||||
|
# - ip: "127.0.0.2"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname2"
|
||||||
|
persistentVolumeClaim: {}
|
||||||
|
# data volume, mountPath is /opt/apache-doris/be/storage
|
||||||
|
# dataPersistentVolume:
|
||||||
|
# storage: 200Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
# logs volume, mountPath is /opt/apache-doris/be/log
|
||||||
|
# logsPersistentVolume:
|
||||||
|
# storage: 100Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
systemInitialization: {}
|
||||||
|
# initImage: "selectdb/alpine:latest"
|
||||||
|
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
|
||||||
|
# specific cn auto scale policy
|
||||||
|
autoScalingPolicy:
|
||||||
|
enable: true
|
||||||
|
# version: v2
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 4
|
||||||
|
# hpaPolicy response to horizontlaPodAutoscaler's HorizontalPodAutoscalerSpec
|
||||||
|
hpaPolicy: {}
|
||||||
|
# metrics:
|
||||||
|
# - type: Resource
|
||||||
|
# resource:
|
||||||
|
# name: cpu
|
||||||
|
# target:
|
||||||
|
# type: Utilization
|
||||||
|
# averageUtilization: 30
|
||||||
|
|
||||||
|
brokerSpec:
|
||||||
|
replicas: 3
|
||||||
|
# the pod labels for user select or classify pods.
|
||||||
|
labels: {}
|
||||||
|
# selectdb.app.classify: "classify-1"
|
||||||
|
image:
|
||||||
|
repository: apache/doris
|
||||||
|
tag: broker-2.1.7
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: imagePullSecret
|
||||||
|
# specify the configMap, as a dictionary.
|
||||||
|
# Its 'key' is the config file name in the doris 'doris_root_path/fe/conf/' file path,
|
||||||
|
# Its 'value' is the text content of the corresponding file (it should be relatively long, actually).
|
||||||
|
configMap: {}
|
||||||
|
# 'apache_hdfs_broker.conf' is the startup config file of broker, and the name must be 'apache_hdfs_broker.conf'.
|
||||||
|
# apache_hdfs_broker.conf: |
|
||||||
|
# broker_ipc_port: 8000
|
||||||
|
# client_expire_seconds: 3600
|
||||||
|
# Other config files can also be specified here
|
||||||
|
# hdfs-site.xml: |
|
||||||
|
# <?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
#
|
||||||
|
# <configuration>
|
||||||
|
# <property>
|
||||||
|
# <name>dfs.nameservices</name>
|
||||||
|
# <value>ns</value>
|
||||||
|
# </property>
|
||||||
|
#
|
||||||
|
# mountConfigMaps supports configmap mounting in the form of specifying the mounting directory
|
||||||
|
# but please note that the mounting path cannot be repeated.
|
||||||
|
# if the elements within mountConfigMaps contain data, it means that the configmap does not need to be created manually by the user, and vice versa.
|
||||||
|
# in the following example, the user needs to manually create configmap 'cm-1'
|
||||||
|
# mountConfigMaps: {}
|
||||||
|
# - configMapName: cm-1
|
||||||
|
# mountPath: /etc/cm1
|
||||||
|
# - configMapName: cm-2
|
||||||
|
# mountPath: /etc/cm2
|
||||||
|
# data:
|
||||||
|
# copy_file1: |
|
||||||
|
# text *** content
|
||||||
|
# copy_file2: |
|
||||||
|
# text *** content
|
||||||
|
|
||||||
|
# If configured separately here, it will overwrite the total resources configuration default.
|
||||||
|
# but the default configuration of other types will still take effect.
|
||||||
|
resource: {}
|
||||||
|
# requests:
|
||||||
|
# cpu: 8
|
||||||
|
# memory: 16Gi
|
||||||
|
# limits:
|
||||||
|
# cpu: 16
|
||||||
|
# memory: 32Gi
|
||||||
|
# If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes"
|
||||||
|
nodeSelector: {}
|
||||||
|
# kubernetes.io/arch: amd64
|
||||||
|
# kubernetes.io/os: linux
|
||||||
|
# app.kubernetes.node.name: "k8s-node1"
|
||||||
|
# env represents an environment variable present in a Container.
|
||||||
|
# 'name' of the environment variable. Must be a C_IDENTIFIER.
|
||||||
|
# no more than one of the following may be specified: 'value' or 'valueFrom',
|
||||||
|
# 'value' is the value of environment.
|
||||||
|
# 'valueFrom' is source for the environment variable's value. Cannot be used if 'value' is not empty.
|
||||||
|
env: []
|
||||||
|
# - name: "HOME"
|
||||||
|
# value: "/opt/selectdb"
|
||||||
|
# - name: HOST_IP
|
||||||
|
# valueFrom:
|
||||||
|
# fieldRef:
|
||||||
|
# apiVersion: v1
|
||||||
|
# fieldPath: status.hostIP
|
||||||
|
|
||||||
|
# affinity is the constraints of pod scheduling
|
||||||
|
# nodeAffinity: Describes node affinity scheduling rules for the pod.
|
||||||
|
# podAffinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# PodAntiAffinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||||
|
# reference: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
|
||||||
|
affinity: {}
|
||||||
|
# nodeAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "k8s-node1"
|
||||||
|
# "k8s-node2"
|
||||||
|
# podAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "In"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
# podAntiAffinity:
|
||||||
|
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
# nodeSelectorTerms:
|
||||||
|
# - matchExpressions:
|
||||||
|
# key: "app.kubernetes.node.name"
|
||||||
|
# operator: "NotIn"
|
||||||
|
# values:
|
||||||
|
# - "fe1"
|
||||||
|
# "fe2"
|
||||||
|
|
||||||
|
# the pod this Toleration is attached to tolerates any taint that matches. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||||
|
# the triple <key,value,effect> using the matching operator <operator>.
|
||||||
|
# 'key' is the taint key that the toleration applies to. Empty means match all taint keys. if the 'key' is empty, 'operator' must be Exists; this combination means to match all values and all keys.
|
||||||
|
# 'operator' represents a key's relationship to the 'value'. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
|
||||||
|
# 'value' is the taint value the toleration matches to. If the 'operator' is Exists, the 'value' should be empty, otherwise just a regular string.
|
||||||
|
# 'effect' indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||||
|
tolerations: []
|
||||||
|
# - key: "toleration"
|
||||||
|
# operator: "Equal"
|
||||||
|
# value: "master"
|
||||||
|
# - key: "node-role.kubernetes.io/master"
|
||||||
|
# operator: "Exists"
|
||||||
|
# effect: "NoSchedule"
|
||||||
|
# - operator: "Exists"
|
||||||
|
|
||||||
|
# hostAliases allows adding entries to /etc/hosts inside the containers
|
||||||
|
hostAliases: []
|
||||||
|
# If doris use hdfs or catalog, config the map of namenode host here is beneficial
|
||||||
|
# - ip: "127.0.0.1"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname1"
|
||||||
|
# - ip: "127.0.0.2"
|
||||||
|
# hostnames:
|
||||||
|
# - "hostname2"
|
||||||
|
persistentVolumeClaim: {}
|
||||||
|
# logs volume, mountPath is /opt/apache-doris/apache_hdfs_broker/log
|
||||||
|
# logsPersistentVolume:
|
||||||
|
# storage: 100Gi
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
# storageClassName: ""
|
||||||
|
systemInitialization: {}
|
||||||
|
# initImage: "selectdb/alpine:latest"
|
||||||
|
# command: [ "/sbin/sysctl", "-w", "vm.max_map_count=2000000" ]
|
||||||
|
|
||||||
|
|
||||||
387
agent-common/SplitProject/ranjing-python-devfusion/Dev_Fusion.py
Normal file
387
agent-common/SplitProject/ranjing-python-devfusion/Dev_Fusion.py
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
import random
|
||||||
|
import threading
|
||||||
|
from queue import Queue
|
||||||
|
from paho.mqtt import client as mqtt_client
|
||||||
|
import numpy as np
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import datetime
|
||||||
|
from KF_V2 import *
|
||||||
|
from utils import *
|
||||||
|
from config import *
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
# 首先加载yaml配置
|
||||||
|
def load_mqtt_config():
|
||||||
|
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
return config['mqtt'], config['topics']
|
||||||
|
|
||||||
|
# 获取MQTT和topics配置
|
||||||
|
mqtt_config, topics_config = load_mqtt_config()
|
||||||
|
|
||||||
|
## =======================
|
||||||
|
# MQTT 代理地址
|
||||||
|
# broker = '192.168.36.234'
|
||||||
|
# port = 37826
|
||||||
|
# username = "cmlc"
|
||||||
|
# password = "odD8#Ve7.B"
|
||||||
|
client_id = f'python-mqtt-{random.randint(0, 100)}'
|
||||||
|
|
||||||
|
# 创建 ArgumentParser 对象
|
||||||
|
parser = argparse.ArgumentParser(description='处理命令行参数')
|
||||||
|
|
||||||
|
# 添加参数 task_id,简称 t,类型为 int,默认值为 1
|
||||||
|
parser.add_argument('-t', '--task_id', type=str, default="+", help='任务ID')
|
||||||
|
|
||||||
|
# 添加参数 gate,简称 g,类型为 str,默认值为 "default_gate"
|
||||||
|
parser.add_argument('-g', '--gate', type=int, default=30, help='门限值')
|
||||||
|
|
||||||
|
# 添加参数 interval,简称 i,类型为 float,默认值为 1.0
|
||||||
|
parser.add_argument('-i', '--interval', type=float, default=1.0, help='时间间隔')
|
||||||
|
|
||||||
|
# 解析命令行参数
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# 实例化 DataFusion 类
|
||||||
|
fusion_instance = DataFusion(
|
||||||
|
|
||||||
|
gate=args.gate,
|
||||||
|
interval=args.interval,
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
global task_id
|
||||||
|
task_id = "10087"
|
||||||
|
|
||||||
|
# 从yaml的mqtt_topic中提取基础路径
|
||||||
|
base_path = topics_config['mqtt_topic'].split('/')[0] # 获取"bridge"
|
||||||
|
|
||||||
|
# 更新数据上报的主题格式
|
||||||
|
providerCode = "DP74b4ef9fb4aaf269"
|
||||||
|
fusionCode = "DPZYLY"
|
||||||
|
deviceType = "5ga"
|
||||||
|
fusionType = "fusion"
|
||||||
|
deviceId = "10580005"
|
||||||
|
fusionId = "554343465692430336"
|
||||||
|
sensor_id_list = ["80103"]
|
||||||
|
|
||||||
|
# 使用base_path构建topic
|
||||||
|
topic = f"{base_path}/{providerCode}/device_data/{deviceType}/{deviceId}"
|
||||||
|
|
||||||
|
# 从yaml的sensor_topic中提取基础路径
|
||||||
|
base_topic = topics_config['sensor_topic'].split('FU_PAM')[0] # 得到 "fromcheck/DP74b4ef9fb4aaf269/device_data/"
|
||||||
|
|
||||||
|
# 订阅主题 - 基于yaml格式构建
|
||||||
|
subscribe_topic = f"{base_topic}5ga/10000000000000" # 将FU_PAM替换为5ga,将+替换为具体ID
|
||||||
|
|
||||||
|
# 发布融合结果的主题
|
||||||
|
# fusionId的来源是下发任务时的ID
|
||||||
|
publish_topic = f"fromcheck/{fusionCode}/device_data/{fusionType}/{task_id}"
|
||||||
|
# 更新运行参数的主题
|
||||||
|
fusion_parameters_topic = topics_config['sensor_topic']
|
||||||
|
# 生成唯一的 client_id
|
||||||
|
|
||||||
|
# 数据池
|
||||||
|
data_pool = Queue()
|
||||||
|
run_parameter = None
|
||||||
|
interval = args.interval
|
||||||
|
|
||||||
|
# 定义参考点 PO(纬度, 经度)
|
||||||
|
global reference_point
|
||||||
|
reference_point = (104.08, 30.51) # 参考点的经纬度
|
||||||
|
# 数据池
|
||||||
|
data_pool = Queue()
|
||||||
|
run_parameter = None
|
||||||
|
# 初始化数据处理类
|
||||||
|
pipe = Pipeline(fusion_parameters_topic=topics_config['sensor_topic'], reference_point=reference_point)
|
||||||
|
|
||||||
|
fusion_code = "FU_PAM/"+args.task_id
|
||||||
|
|
||||||
|
# 设置日志记录
|
||||||
|
def setup_logging():
|
||||||
|
# 创建logs目录(如果不存在)
|
||||||
|
if not os.path.exists('logs'):
|
||||||
|
os.makedirs('logs')
|
||||||
|
|
||||||
|
# 设置日志文件名(包含日期)
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
error_log_filename = f'logs/mqtt_connection_{current_time.strftime("%Y%m%d")}_error.log'
|
||||||
|
|
||||||
|
# 配置总的日志记录器
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO, # 记录所有信息
|
||||||
|
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.StreamHandler() # 同时输出到控制台
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# 配置错误日志记录器
|
||||||
|
error_logger = logging.getLogger('error_logger')
|
||||||
|
error_logger.setLevel(logging.ERROR)
|
||||||
|
|
||||||
|
# 创建文件处理器
|
||||||
|
error_handler = logging.FileHandler(error_log_filename)
|
||||||
|
error_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||||
|
|
||||||
|
# 添加处理器到错误日志记录器
|
||||||
|
error_logger.addHandler(error_handler)
|
||||||
|
|
||||||
|
def connect_mqtt() -> mqtt_client:
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
if rc == 0:
|
||||||
|
logging.info("Successfully connected to MQTT Broker")
|
||||||
|
logging.info(f"Client ID: {client_id}")
|
||||||
|
logging.info(f"Broker: {mqtt_config['broker']}:{mqtt_config['port']}")
|
||||||
|
# 重新订阅主题
|
||||||
|
client.subscribe(fusion_parameters_topic)
|
||||||
|
logging.info(f"Subscribed to fusion parameters topic: {fusion_parameters_topic}")
|
||||||
|
if hasattr(pipe, 'topics'):
|
||||||
|
for topic in pipe.topics:
|
||||||
|
client.subscribe(topic)
|
||||||
|
logging.info(f"Subscribed to topic: {topic}")
|
||||||
|
else:
|
||||||
|
logging.error(f"Failed to connect, return code: {rc} ({DISCONNECT_REASONS.get(rc, '未知错误')})")
|
||||||
|
|
||||||
|
def on_disconnect(client, userdata, rc):
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
reason = DISCONNECT_REASONS.get(rc, "未知错误")
|
||||||
|
|
||||||
|
logging.warning(f"Disconnected from MQTT Broker at {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||||
|
logging.warning(f"Disconnect reason code: {rc} - {reason}")
|
||||||
|
|
||||||
|
if rc != 0:
|
||||||
|
logging.error("Unexpected disconnection. Attempting to reconnect...")
|
||||||
|
try:
|
||||||
|
client.reconnect()
|
||||||
|
logging.info("Reconnection successful")
|
||||||
|
except Exception as e:
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
logging.error(f"Reconnection failed at {current_time.strftime('%Y-%m-%d %H:%M:%S')}: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
|
||||||
|
client = mqtt_client.Client(client_id, clean_session=True)
|
||||||
|
client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
|
||||||
|
|
||||||
|
# 设置保活时间和重试间隔
|
||||||
|
client.keepalive = 60 # 60秒的保活时间
|
||||||
|
client.socket_timeout = 30 # 30秒的socket超时
|
||||||
|
client.reconnect_delay_set(min_delay=1, max_delay=60) # 重连延迟在1-60秒之间
|
||||||
|
|
||||||
|
# 设置遗嘱消息(last will message)
|
||||||
|
will_topic = f"fromcheck/{fusionCode}/status/{task_id}"
|
||||||
|
will_payload = "offline"
|
||||||
|
client.will_set(will_topic, will_payload, qos=1, retain=True)
|
||||||
|
|
||||||
|
# 设置回调函数
|
||||||
|
client.on_connect = on_connect
|
||||||
|
client.on_disconnect = on_disconnect
|
||||||
|
|
||||||
|
try:
|
||||||
|
client.connect(mqtt_config['broker'], mqtt_config['port'])
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Initial connection failed: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
time.sleep(5)
|
||||||
|
return connect_mqtt()
|
||||||
|
|
||||||
|
# 发送上线状态
|
||||||
|
client.publish(will_topic, "online", qos=1, retain=True)
|
||||||
|
|
||||||
|
return client
|
||||||
|
|
||||||
|
def subscribe(client: mqtt_client):
|
||||||
|
def on_message(client, userdata, msg):
|
||||||
|
try:
|
||||||
|
global run_parameter
|
||||||
|
global task_id
|
||||||
|
logging.info(f"Received message on topic: {msg.topic}")
|
||||||
|
logging.info(f"Message payload: {msg.payload.decode()}")
|
||||||
|
|
||||||
|
if "FU_PAM" in msg.topic:
|
||||||
|
if args.task_id == '+' or fusion_code in msg.topic:
|
||||||
|
new_run_parameter = msg.payload.decode()
|
||||||
|
if run_parameter != new_run_parameter:
|
||||||
|
logging.info(f"Run parameter updated from {run_parameter} to {new_run_parameter}")
|
||||||
|
run_parameter = new_run_parameter
|
||||||
|
new_topics = pipe.extract_parms(run_parameter)
|
||||||
|
logging.info(f"Extracted topics: {new_topics}")
|
||||||
|
client.subscribe(new_topics) # 重新更新订阅的数据
|
||||||
|
logging.info(f"Subscribed to new topics: {new_topics}")
|
||||||
|
logging.info('===========new run_parameter!===============')
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
task_id = pipe.task_id
|
||||||
|
else:
|
||||||
|
data_pool.put((msg.topic, msg.payload))
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error processing message: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
|
||||||
|
subscribe_topics = [(subscribe_topic, 0), (fusion_parameters_topic, 0)] # 默认QoS为0
|
||||||
|
client.subscribe(subscribe_topics)
|
||||||
|
client.on_message = on_message
|
||||||
|
|
||||||
|
|
||||||
|
def publish(client, message):
|
||||||
|
global task_id
|
||||||
|
global fusionCode
|
||||||
|
max_retries = 3
|
||||||
|
retry_delay = 1 # 初始重试延迟(秒)
|
||||||
|
|
||||||
|
def do_publish():
|
||||||
|
publish_topic = f"bridge/{fusionCode}/device_data/fusion/{task_id}"
|
||||||
|
try:
|
||||||
|
result = client.publish(publish_topic, message)
|
||||||
|
status = result.rc
|
||||||
|
if status == 0:
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
with open('log.txt', 'a') as log_file:
|
||||||
|
log_file.write('=====================\n')
|
||||||
|
log_file.write(f"Send message to topic {publish_topic}\n")
|
||||||
|
log_file.write(f"time: {formatted_time}\n")
|
||||||
|
log_file.write(f"{message}\n")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logging.error(f"Failed to send message to topic {publish_topic}, status: {status}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error publishing message: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# 实现重试逻辑
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
if do_publish():
|
||||||
|
return
|
||||||
|
if attempt < max_retries - 1: # 如果不是最后一次尝试
|
||||||
|
retry_delay *= 2 # 指数退避
|
||||||
|
logging.warning(f"Retrying publish in {retry_delay} seconds...")
|
||||||
|
time.sleep(retry_delay)
|
||||||
|
|
||||||
|
logging.error(f"Failed to publish message after {max_retries} attempts")
|
||||||
|
|
||||||
|
|
||||||
|
def data_fusion(fusion_container):
|
||||||
|
global data_pool
|
||||||
|
data_list = []
|
||||||
|
# 从数据池中提取所有的数据
|
||||||
|
while not data_pool.empty():
|
||||||
|
data_now = data_pool.get()
|
||||||
|
processed_data = pipe.process_json_data(data_now[1])
|
||||||
|
|
||||||
|
# 筛选有意义的数据
|
||||||
|
if processed_data and processed_data.get("objects"): # 只记录有 objects 的数据
|
||||||
|
data_list.append(processed_data)
|
||||||
|
|
||||||
|
if data_list: # 只有当有数据时才写日志
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
with open('Data_log.txt', 'a') as log_file: # 以追加模式打开日志文件
|
||||||
|
log_file.write('=====================\n') # 写入分隔符
|
||||||
|
log_file.write(f"Get message \n")
|
||||||
|
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
|
||||||
|
log_file.write(f"{data_list}\n") # 写入消息内容
|
||||||
|
|
||||||
|
sensor_data = pipe.data_encoder(data_list)
|
||||||
|
logging.info(sensor_data)
|
||||||
|
filtered_results = fusion_container.run(sensor_data)
|
||||||
|
processed_data = pipe.data_decoder(filtered_results)
|
||||||
|
processed_data = json.dumps(processed_data, indent=4)
|
||||||
|
return processed_data # 返回处理后的 JSON 字符串
|
||||||
|
|
||||||
|
|
||||||
|
def fusion_runner(client):
|
||||||
|
global run_parameter
|
||||||
|
pre_run_parameter = run_parameter
|
||||||
|
last_run_time = time.time()
|
||||||
|
last_health_check = time.time()
|
||||||
|
health_check_interval = 30 # 每30秒进行一次健康检查
|
||||||
|
fusion_container = DataFusion(args.gate, args.interval)
|
||||||
|
|
||||||
|
|
||||||
|
def check_connection():
|
||||||
|
if not client.is_connected():
|
||||||
|
logging.warning("MQTT client disconnected during fusion_runner")
|
||||||
|
try:
|
||||||
|
client.reconnect()
|
||||||
|
logging.info("Successfully reconnected in fusion_runner")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Reconnection failed in fusion_runner: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
current_time = time.time()
|
||||||
|
|
||||||
|
# 定期健康检查
|
||||||
|
if current_time - last_health_check >= health_check_interval:
|
||||||
|
if not check_connection():
|
||||||
|
time.sleep(5) # 如果连接失败,等待5秒后继续
|
||||||
|
continue
|
||||||
|
last_health_check = current_time
|
||||||
|
|
||||||
|
# 数据处理和发送
|
||||||
|
if current_time - last_run_time >= interval:
|
||||||
|
if not check_connection():
|
||||||
|
continue
|
||||||
|
|
||||||
|
last_run_time = current_time
|
||||||
|
|
||||||
|
if run_parameter != pre_run_parameter:
|
||||||
|
fusion_parms = pipe.extract_fusion_parms(run_parameter)
|
||||||
|
fusion_container.set_parameter(fusion_parms)
|
||||||
|
pre_run_parameter= run_parameter
|
||||||
|
|
||||||
|
|
||||||
|
processed_data = data_fusion(fusion_container)
|
||||||
|
if processed_data:
|
||||||
|
publish(client, processed_data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error in fusion_runner: {str(e)}")
|
||||||
|
logging.error(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.error(f"Stack trace:", exc_info=True)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# 初始化日志系统
|
||||||
|
setup_logging()
|
||||||
|
logging.error("Starting MQTT client application")
|
||||||
|
|
||||||
|
while True: # 添加外层循环来处理完全断开的情况
|
||||||
|
try:
|
||||||
|
client = connect_mqtt()
|
||||||
|
subscribe(client)
|
||||||
|
|
||||||
|
logging.info("Starting fusion_runner thread")
|
||||||
|
fusion_runner_thread = threading.Thread(target=fusion_runner, args=(client,), daemon=True)
|
||||||
|
fusion_runner_thread.start()
|
||||||
|
|
||||||
|
logging.info("Starting MQTT loop")
|
||||||
|
client.loop_forever()
|
||||||
|
except Exception as e:
|
||||||
|
logging.critical(f"Critical error in main loop: {str(e)}")
|
||||||
|
logging.critical(f"Exception type: {type(e).__name__}")
|
||||||
|
logging.critical(f"Stack trace:", exc_info=True)
|
||||||
|
logging.info("Restarting in 5 seconds...")
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
run()
|
||||||
@@ -0,0 +1,145 @@
|
|||||||
|
import json
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
from math import radians, degrees, sin, cos
|
||||||
|
from paho.mqtt import client as mqtt_client
|
||||||
|
import datetime
|
||||||
|
import numpy as np
|
||||||
|
from math import atan2, sqrt
|
||||||
|
|
||||||
|
|
||||||
|
# 坐标转换函数
|
||||||
|
def convert_to_cartesian(lat, lon, reference_point):
|
||||||
|
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
|
||||||
|
# 地球椭球参数(WGS84)
|
||||||
|
a = 6378137.0 # 长半轴,单位:米
|
||||||
|
f = 1 / 298.257223563 # 扁率
|
||||||
|
e2 = 2 * f - f ** 2 # 第一偏心率平方
|
||||||
|
|
||||||
|
# 提取参考点坐标
|
||||||
|
ref_lat, ref_lon = reference_point
|
||||||
|
|
||||||
|
# 转换成弧度
|
||||||
|
lat_rad = radians(lat)
|
||||||
|
lon_rad = radians(lon)
|
||||||
|
ref_lat_rad = radians(ref_lat)
|
||||||
|
ref_lon_rad = radians(ref_lon)
|
||||||
|
|
||||||
|
# 计算曲率半径
|
||||||
|
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
|
||||||
|
|
||||||
|
# 计算基于参考点的平面直角坐标
|
||||||
|
delta_lon = lon_rad - ref_lon_rad
|
||||||
|
X = (N + 0) * cos(ref_lat_rad) * delta_lon
|
||||||
|
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
|
||||||
|
|
||||||
|
return X, Y
|
||||||
|
|
||||||
|
|
||||||
|
# 模拟数据生成函数
|
||||||
|
def generate_simulated_data(reference_point, radius_km, angle):
|
||||||
|
"""生成模拟数据,符合 Pipeline 处理需求"""
|
||||||
|
R = 6371000 # 地球半径(米)
|
||||||
|
|
||||||
|
# 将半径转换为弧度
|
||||||
|
radius = radius_km / R
|
||||||
|
|
||||||
|
# 计算参考点经纬度
|
||||||
|
lat0, lon0 = reference_point
|
||||||
|
|
||||||
|
# 计算新的点的经度和纬度
|
||||||
|
new_lat = lat0 + degrees(radius * cos(radians(angle)))
|
||||||
|
new_lon = lon0 + degrees(radius * sin(radians(angle)) / cos(radians(lat0)))
|
||||||
|
|
||||||
|
# 生成模拟 JSON 数据
|
||||||
|
mock_data = {
|
||||||
|
"deviceId": "80103",
|
||||||
|
"deviceType": 10,
|
||||||
|
"objects": [
|
||||||
|
{
|
||||||
|
"altitude": 150.0, # 模拟高度
|
||||||
|
"extension": {
|
||||||
|
"traceId": "00000000000001876",
|
||||||
|
"channel": "5756500000",
|
||||||
|
"objectType": 30,
|
||||||
|
"uavId": "UAS123456", # 新增字段,与 Pipeline 对应
|
||||||
|
"uavModel": "DJI Mini 3 Pro", # 模拟 UAV 型号
|
||||||
|
"deviceId": "80103" # 来源设备 ID
|
||||||
|
},
|
||||||
|
"height": 120.0, # 高度
|
||||||
|
"latitude": new_lat,
|
||||||
|
"longitude": new_lon,
|
||||||
|
"X": 0.0, # 预留字段,供转换函数填充
|
||||||
|
"Y": 0.0, # 预留字段,供转换函数填充
|
||||||
|
"speed": 15.0, # 模拟速度
|
||||||
|
"objectId": "AX0009", # 模拟目标 ID
|
||||||
|
"time": int(time.time() * 1000), # 当前时间戳(毫秒)
|
||||||
|
"source": [["sensor1", "UAS123456"]] # 模拟来源
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"providerCode": "ZYLYTEST",
|
||||||
|
"ptTime": int(time.time() * 1000) # 当前时间戳(毫秒)
|
||||||
|
}
|
||||||
|
|
||||||
|
# 转换坐标
|
||||||
|
for obj in mock_data["objects"]:
|
||||||
|
lat, lon = obj["latitude"], obj["longitude"]
|
||||||
|
obj["X"], obj["Y"] = convert_to_cartesian(lat, lon, reference_point)
|
||||||
|
|
||||||
|
return json.dumps(mock_data, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
# MQTT 推送代码
|
||||||
|
broker = '192.168.36.234'
|
||||||
|
port = 37826
|
||||||
|
providerCode = "DP74b4ef9fb4aaf269"
|
||||||
|
deviceType = "5ga"
|
||||||
|
deviceId = "10580015"
|
||||||
|
topic = f"bridge/{providerCode}/device_data/{deviceType}/{deviceId}"
|
||||||
|
client_id = f'python-mqtt-{random.randint(0, 1000)}'
|
||||||
|
username = "cmlc"
|
||||||
|
password = "odD8#Ve7.B"
|
||||||
|
reference_point = (31.880000, 117.240000) # 经度和纬度
|
||||||
|
radius = 1500 # 半径,单位:米
|
||||||
|
|
||||||
|
|
||||||
|
def connect_mqtt():
|
||||||
|
"""连接 MQTT Broker"""
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
if rc == 0:
|
||||||
|
print("Connected to MQTT Broker!")
|
||||||
|
else:
|
||||||
|
print(f"Failed to connect, return code {rc}")
|
||||||
|
|
||||||
|
client = mqtt_client.Client(client_id)
|
||||||
|
client.on_connect = on_connect
|
||||||
|
client.username_pw_set(username, password)
|
||||||
|
client.connect(broker, port)
|
||||||
|
return client
|
||||||
|
|
||||||
|
|
||||||
|
def publish(client):
|
||||||
|
"""推送生成的模拟数据"""
|
||||||
|
msg_count = 0
|
||||||
|
angle = 0
|
||||||
|
while True:
|
||||||
|
time.sleep(1)
|
||||||
|
msg = generate_simulated_data(reference_point, radius, angle)
|
||||||
|
result = client.publish(topic, msg)
|
||||||
|
status = result.rc
|
||||||
|
if status == 0:
|
||||||
|
print(f"Send `{msg_count}` to topic `{topic}`")
|
||||||
|
else:
|
||||||
|
print(f"Failed to send message to topic {topic}")
|
||||||
|
msg_count += 1
|
||||||
|
angle += 1
|
||||||
|
|
||||||
|
|
||||||
|
def run():
|
||||||
|
client = connect_mqtt()
|
||||||
|
client.loop_start()
|
||||||
|
publish(client)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
run()
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
# 构建阶段
|
||||||
|
FROM python:3.12.8-slim-bookworm as builder
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN pip install --user -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
|
|
||||||
|
# 运行阶段
|
||||||
|
FROM python:3.12.8-slim-bookworm
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /root/.local/lib/python3.12/site-packages /root/.local/lib/python3.12/site-packages
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
CMD ["python", "check.py"]
|
||||||
279
agent-common/SplitProject/ranjing-python-devfusion/KF_V2.py
Normal file
279
agent-common/SplitProject/ranjing-python-devfusion/KF_V2.py
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
import datetime
|
||||||
|
from os import error
|
||||||
|
import numpy as np
|
||||||
|
from config import *
|
||||||
|
|
||||||
|
def calculate_euclidean_distances(A, B):
|
||||||
|
# 计算A和B之间的欧式距离
|
||||||
|
distances = np.linalg.norm(A - B, axis=1)
|
||||||
|
# 找到最小距离及其索引
|
||||||
|
min_distance_index = np.argmin(distances)
|
||||||
|
min_distance = distances[min_distance_index]
|
||||||
|
return min_distance, min_distance_index
|
||||||
|
|
||||||
|
def are_lists_equal(listA, listB):
|
||||||
|
# 对两个列表中的子列表进行排序
|
||||||
|
if len(listA) == 0:
|
||||||
|
return False
|
||||||
|
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
|
||||||
|
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
|
||||||
|
# 比较排序后的列表是否相等
|
||||||
|
return sorted_listA == sorted_listB
|
||||||
|
|
||||||
|
def sigmoid(x, a=10, b=0.1):
|
||||||
|
# 调整Sigmoid函数使其在x=1时值为0.5
|
||||||
|
# a和b是调整参数,用于控制函数的形状
|
||||||
|
return 1 / (1 + np.exp(-a * (x - shift_value))) + b
|
||||||
|
|
||||||
|
|
||||||
|
class KalmanFilter:
|
||||||
|
def __init__(self, measurement, com_id, measurement_variance=1,process_variance=1e-1):
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
timestamp = int(current_time.timestamp() * 1000000)
|
||||||
|
ms = measurement.tolist()
|
||||||
|
self.m = np.array([ms[0],ms[1],ms[2],0,0,0]) # 状态量,6维度
|
||||||
|
self.origin = [com_id] #origin 表示最强响应
|
||||||
|
self.source = self.origin #source 表示所有关联的观测值
|
||||||
|
self.survive = np.array(survive_initial) # 初始化生存值
|
||||||
|
self.duration = 0
|
||||||
|
self.counter = 0
|
||||||
|
self.id = str(timestamp % 3600000000 + np.random.randint(1000))
|
||||||
|
self.F = [[1,0,0,1,0,0],
|
||||||
|
[0,1,0,0,1,0],
|
||||||
|
[0,0,1,0,0,1],
|
||||||
|
[0,0,0,1,0,0],
|
||||||
|
[0,0,0,0,1,0],
|
||||||
|
[0,0,0,0,0,1]]
|
||||||
|
self.F = np.array(self.F)
|
||||||
|
self.H = [[1,0,0,0,0,0],
|
||||||
|
[0,1,0,0,0,0],
|
||||||
|
[0,0,1,0,0,0]]
|
||||||
|
self.H = np.array(self.H)
|
||||||
|
self.R = measurement_variance * np.eye(3)
|
||||||
|
self.Q = process_variance * np.eye(6)
|
||||||
|
self.Q[3, 3] = self.Q[3, 3] * 1e-3
|
||||||
|
self.Q[4, 4] = self.Q[4, 4] * 1e-3
|
||||||
|
self.Q[5, 5] = self.Q[5, 5] * 1e-3
|
||||||
|
self.P = np.eye(6)*0.1
|
||||||
|
self.I = np.eye(6)
|
||||||
|
self.expend = 1
|
||||||
|
self.v = np.array([0,0,0])
|
||||||
|
self.born_time = int(current_time.timestamp() * 1000)
|
||||||
|
self.latest_update = self.born_time
|
||||||
|
|
||||||
|
self.m_history = self.m
|
||||||
|
self.s_history = []
|
||||||
|
self.origin_set = [self.origin]
|
||||||
|
|
||||||
|
|
||||||
|
def predict(self):
|
||||||
|
F = self.F
|
||||||
|
self.m = np.dot(F,self.m.T) # 简单一步预测模型
|
||||||
|
self.m = self.m.T
|
||||||
|
self.P = np.dot(np.dot(F,self.P),F.T) + self.Q
|
||||||
|
self.survive = self.survive * decay # 应用衰减值
|
||||||
|
self.origin_set = np.unique(np.array(self.origin_set), axis=0).tolist() # 计算关联集合
|
||||||
|
|
||||||
|
def update(self, res, run_timestamp, gate):
|
||||||
|
self.duration += 0.6 # 每次更新时,持续时间+0.6
|
||||||
|
if len(res['distances']) == 0:
|
||||||
|
mmd = 1e8
|
||||||
|
else:
|
||||||
|
min_distance_index = np.argmin(res['distances'])
|
||||||
|
mmd = res['distances'][min_distance_index]
|
||||||
|
measurement = res['measurements'][min_distance_index]
|
||||||
|
|
||||||
|
# 进行更新
|
||||||
|
if mmd < gate * self.expend:
|
||||||
|
H = self.H
|
||||||
|
I = self.I
|
||||||
|
self.expend = max(self.expend * 0.8, 1)
|
||||||
|
kalman_gain = np.dot(np.dot(self.P,H.T),np.linalg.pinv(np.dot(np.dot(H,self.P),H.T)+self.R))
|
||||||
|
self.m += np.dot(kalman_gain,(measurement.T - np.dot(H,self.m.T)))
|
||||||
|
self.m = self.m.T
|
||||||
|
self.P = np.dot((I - np.dot(kalman_gain,H)),self.P)
|
||||||
|
self.origin = [res['key_ids'][min_distance_index]]
|
||||||
|
self.counter += 1
|
||||||
|
self.survive = sigmoid(self.counter) # 新映射函数
|
||||||
|
# 如下操作防止对速度过于自信
|
||||||
|
self.P[3, 3] = max(1e-1, self.P[3, 3])
|
||||||
|
self.P[4, 4] = max(1e-1, self.P[4, 4])
|
||||||
|
self.P[5, 5] = max(1e-1, self.P[5, 5])
|
||||||
|
# 截取速度
|
||||||
|
self.v = self.m[3:6]
|
||||||
|
self.origin_set.append(self.origin)
|
||||||
|
self.latest_update = run_timestamp #对时间进行处理
|
||||||
|
else:
|
||||||
|
self.expend = min(self.expend*1.2,1.5) # 若关联不上,则扩大门限继续搜索
|
||||||
|
self.P[3, 3] = min(self.P[3, 3]*1.1,1)
|
||||||
|
self.P[4, 4] = min(self.P[4, 4]*1.1,1)
|
||||||
|
self.P[5, 5] = min(self.P[5, 5]*1.1,1)
|
||||||
|
self.counter -= 1
|
||||||
|
self.counter = max(self.counter,0)
|
||||||
|
|
||||||
|
self.m_history = np.vstack((self.m_history, self.m))
|
||||||
|
self.s_history.append(self.survive)
|
||||||
|
|
||||||
|
def one_correlation(self, data_matrix, id_list):
|
||||||
|
# 计算现有数据与data_matrix的差距
|
||||||
|
min_distance, min_index = calculate_euclidean_distances(self.m[0:3], data_matrix)
|
||||||
|
m_id = id_list[min_index]
|
||||||
|
measurement = data_matrix[min_index, :]
|
||||||
|
return m_id, min_distance, measurement
|
||||||
|
|
||||||
|
def correlation(self, sensor_data):
|
||||||
|
# 遍历传感器进行计算
|
||||||
|
res = {'m_ids':[], 'distances':[], 'measurements':[], 'key_ids':[]}
|
||||||
|
for value in sensor_data:
|
||||||
|
if len(value['id_list']) > 0:
|
||||||
|
m_id, min_distance, measurement = self.one_correlation(value['data_matrix'], value['id_list'])
|
||||||
|
key = value['deviceId']
|
||||||
|
res['m_ids'].append(m_id)
|
||||||
|
res['measurements'].append(measurement)
|
||||||
|
res['key_ids'].append([key, m_id])
|
||||||
|
# 将发生过关联的目标赋予更大的置信度
|
||||||
|
if [key, m_id] in self.origin_set:
|
||||||
|
min_distance = min_distance * 0.2
|
||||||
|
res['distances'].append(min_distance)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
#融合类的构造函数
|
||||||
|
class DataFusion:
|
||||||
|
def __init__(self,gate=25,interval = 1,fusion_type = 1,
|
||||||
|
measuremrnt_variance=1,process_variance =1e-1):
|
||||||
|
"""
|
||||||
|
初始化DataFusion类。
|
||||||
|
"""
|
||||||
|
# self.task_id = task_id
|
||||||
|
self.interval = interval
|
||||||
|
self.gate = gate
|
||||||
|
self.targets = []
|
||||||
|
self.fusion_type = fusion_type
|
||||||
|
self.existence_thres = 0.01
|
||||||
|
self.show_thres = show_thres
|
||||||
|
self.process_variance = process_variance
|
||||||
|
self.measuremrnt_variance = measuremrnt_variance
|
||||||
|
|
||||||
|
def set_parameter(self,fusion_parms):
|
||||||
|
print("GO!!!!!!!!!")
|
||||||
|
print(fusion_parms)
|
||||||
|
|
||||||
|
def obtain_priority(self,sensor_data):
|
||||||
|
self.priority_dict = dict()
|
||||||
|
for data in sensor_data:
|
||||||
|
if data.get('priority'):
|
||||||
|
self.priority_dict[data['deviceId']] = data['priority']
|
||||||
|
else:
|
||||||
|
self.priority_dict[data['deviceId']] = 1
|
||||||
|
|
||||||
|
|
||||||
|
def out_transformer(self,target):
|
||||||
|
out_former = {
|
||||||
|
'objectId': target.id,
|
||||||
|
'survive': target.survive.tolist(),
|
||||||
|
'state': target.m.tolist(),
|
||||||
|
'speed': np.linalg.norm(target.v).tolist() / self.interval,
|
||||||
|
'source': target.source,
|
||||||
|
'sigma': np.diag(target.P).tolist(),
|
||||||
|
'X': target.m[0].tolist(),
|
||||||
|
'Y': target.m[1].tolist(),
|
||||||
|
'Z': target.m[2].tolist(),
|
||||||
|
'Vx': target.v[0].tolist(),
|
||||||
|
'Vy': target.v[1].tolist(),
|
||||||
|
'Vz': target.v[2].tolist(),
|
||||||
|
'born_time': str(target.born_time)
|
||||||
|
}
|
||||||
|
return out_former
|
||||||
|
|
||||||
|
|
||||||
|
def run(self, sensor_data):
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
run_timestamp = int(current_time.timestamp() * 1000)
|
||||||
|
fusion_data = []
|
||||||
|
selected_list = []
|
||||||
|
self.obtain_priority(sensor_data)
|
||||||
|
|
||||||
|
# 遍历所有已知对象
|
||||||
|
for target in self.targets:
|
||||||
|
print(f"Fusion target id:{target.id} with survive: {target.survive} at :{target.m}\n")
|
||||||
|
if target.survive < self.existence_thres:
|
||||||
|
continue
|
||||||
|
target.predict()
|
||||||
|
res = target.correlation(sensor_data)
|
||||||
|
target.update(res,run_timestamp,self.gate)
|
||||||
|
# ==================================================
|
||||||
|
now_id = []
|
||||||
|
t_sum = 0
|
||||||
|
for r, distance in enumerate(res['distances']):
|
||||||
|
if distance < self.gate:
|
||||||
|
now_id.append(res['key_ids'][r])
|
||||||
|
selected_list.append(res['key_ids'][r])
|
||||||
|
D_Id = res['key_ids'][r][0]
|
||||||
|
t_sum += self.priority_dict[D_Id]
|
||||||
|
target.source = now_id
|
||||||
|
# ==================================================
|
||||||
|
if self.fusion_type == 2 and t_sum < 2:
|
||||||
|
target.survive = target.survive * 0.5
|
||||||
|
|
||||||
|
out_former = self.out_transformer(target)
|
||||||
|
if target.survive > self.show_thres: # 若存活概率大于0.4,则写入数据文件
|
||||||
|
fusion_data.append(out_former)
|
||||||
|
|
||||||
|
# 根据匹配关系筛选数值
|
||||||
|
self.selected_list = selected_list
|
||||||
|
for data in sensor_data:
|
||||||
|
self.new_born(data)
|
||||||
|
|
||||||
|
self.remove_duplicates()
|
||||||
|
# ==================================================
|
||||||
|
self.fusion_process_log(fusion_data)
|
||||||
|
|
||||||
|
return fusion_data
|
||||||
|
|
||||||
|
def new_born(self,value,):
|
||||||
|
for j, id in enumerate(value['id_list']):
|
||||||
|
key = value['deviceId']
|
||||||
|
if [key, id] not in self.selected_list:
|
||||||
|
if self.fusion_type == 3:
|
||||||
|
if value['priority'] > 50:
|
||||||
|
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
|
||||||
|
else:
|
||||||
|
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
|
||||||
|
self.selected_list.append([key, id]) # 把新增的目标,加入到集合中去
|
||||||
|
|
||||||
|
def remove_duplicates(self):
|
||||||
|
# 创建一个空列表用于存储需要删除的列表的索引
|
||||||
|
to_delete = []
|
||||||
|
|
||||||
|
# 遍历所有列表的索引
|
||||||
|
for i in range(len(self.targets)):
|
||||||
|
if self.targets[i].survive < self.existence_thres:
|
||||||
|
to_delete.append(self.targets[i].id)
|
||||||
|
continue
|
||||||
|
if self.targets[i].survive < self.show_thres:
|
||||||
|
continue
|
||||||
|
for j in range(i + 1, len(self.targets)):
|
||||||
|
# 比较两个列表是否相同
|
||||||
|
if are_lists_equal(self.targets[i].source, self.targets[j].source):
|
||||||
|
# 如果列表相同,记录编号较大的索引
|
||||||
|
if self.targets[i].duration < self.targets[j].duration:
|
||||||
|
to_delete.append(self.targets[i].id)
|
||||||
|
else:
|
||||||
|
to_delete.append(self.targets[j].id)
|
||||||
|
|
||||||
|
# 使用删除法,提高目标管理效率
|
||||||
|
for item_id in sorted(to_delete, reverse=True):
|
||||||
|
for target in self.targets:
|
||||||
|
if target.id == item_id:
|
||||||
|
self.targets.remove(target)
|
||||||
|
break
|
||||||
|
|
||||||
|
def fusion_process_log(self,fusion_data):
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
# 格式化时间为年月日时分秒格式
|
||||||
|
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
with open('process_log.txt', 'a') as log_file: # 以追加模式打开日志文件
|
||||||
|
log_file.write('=====================\n') # 写入分隔符
|
||||||
|
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
|
||||||
|
log_file.write(f"data:\n {fusion_data}\n") # 写入消息内容
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
from KF_V2 import *
|
||||||
|
# ======================
|
||||||
|
|
||||||
|
sensor_id_list = ['AUV01','AUV02']
|
||||||
|
|
||||||
|
sensor_data = []
|
||||||
|
sensor_data.append({
|
||||||
|
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
|
||||||
|
'id_list': ['001','002'],
|
||||||
|
'deviceId': 'AUV01',
|
||||||
|
'devicePs':[0.2], #第一个值表示测量误差
|
||||||
|
'latest_time': [0],
|
||||||
|
'priority':1
|
||||||
|
})
|
||||||
|
sensor_data.append({
|
||||||
|
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
|
||||||
|
'id_list': ['003','004'],
|
||||||
|
'deviceId': 'AUV02',
|
||||||
|
'deivceProperties':[0.2],
|
||||||
|
'latest_time': [0],
|
||||||
|
'priority':100
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
fusion_container = DataFusion(25,1,3)
|
||||||
|
for i in range(15):
|
||||||
|
print(i)
|
||||||
|
# 在循环开始时,对 sensor_data 中的 data_matrix 进行修改
|
||||||
|
if i%5 == 0:
|
||||||
|
temp = {
|
||||||
|
'data_matrix': np.array([]),
|
||||||
|
'id_list': [],
|
||||||
|
'deviceId': 'AUV01',
|
||||||
|
'devicePs': [0.2], # 第一个值表示测量误差
|
||||||
|
'latest_time': [0]
|
||||||
|
}
|
||||||
|
c_sensor_data = []
|
||||||
|
c_sensor_data.append(temp)
|
||||||
|
c_sensor_data.append(temp)
|
||||||
|
filted_results = fusion_container.run(c_sensor_data)
|
||||||
|
else:
|
||||||
|
sensor_data[0]['data_matrix'][0, :] += 1 # 第一行每个元素加1
|
||||||
|
sensor_data[0]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
|
||||||
|
sensor_data[1]['data_matrix'][0, :] += 1 # 第一行每个元素加1
|
||||||
|
sensor_data[1]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
|
||||||
|
filted_results = fusion_container.run(sensor_data)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
print("results:\n")
|
||||||
|
for d in filted_results:
|
||||||
|
print(d)
|
||||||
142
agent-common/SplitProject/ranjing-python-devfusion/SensorTool.py
Normal file
142
agent-common/SplitProject/ranjing-python-devfusion/SensorTool.py
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
import numpy as np
|
||||||
|
from scipy import signal
|
||||||
|
|
||||||
|
|
||||||
|
class AoAConverter:
|
||||||
|
def __init__(self):
|
||||||
|
self.p = [1e8, 1e8, 1e8]
|
||||||
|
|
||||||
|
def to_cartesian(self, theta_rad, phi_rad):
|
||||||
|
# theta_rad = np.radians(theta)
|
||||||
|
# phi_rad = np.radians(phi)
|
||||||
|
# 注意!程序输入的是弧度单位
|
||||||
|
"""将球坐标转换为直角坐标"""
|
||||||
|
x = np.sin(theta_rad) * np.cos(phi_rad)
|
||||||
|
y = np.sin(theta_rad) * np.sin(phi_rad)
|
||||||
|
z = np.cos(theta_rad)
|
||||||
|
pc =np.array([x,y,z])
|
||||||
|
return pc
|
||||||
|
|
||||||
|
def calc_error(self, pc, mc):
|
||||||
|
# 计算预测坐标与实际观测坐标之间的差的平方
|
||||||
|
mc = np.expand_dims(mc, axis=1)
|
||||||
|
diff_squared = (pc - mc) ** 2
|
||||||
|
# 对差值的平方求和,得到误差的平方
|
||||||
|
error_squared = np.sum(diff_squared, axis=0)
|
||||||
|
# 开平方根得到误差
|
||||||
|
return np.sqrt(error_squared)
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
def find_best_r(self, theta, phi, mc, r_range):
|
||||||
|
"""在给定范围内搜索最优的 r 值"""
|
||||||
|
# 将 r_range 转换为 NumPy 数组,以便进行矢量化操作
|
||||||
|
r_values = np.array(r_range)
|
||||||
|
# 计算所有可能的直角坐标
|
||||||
|
pc = self.to_cartesian(theta, phi)
|
||||||
|
# 进行维度扩充以进行矩阵乘法
|
||||||
|
r_values = np.expand_dims(r_values, axis=0)
|
||||||
|
pc = np.expand_dims(pc, axis=1)
|
||||||
|
# 计算所有 r 值对应的误差
|
||||||
|
# print([pc.shape,r_values.shape])
|
||||||
|
D = np.dot(pc, r_values)
|
||||||
|
errors = self.calc_error(D, mc)
|
||||||
|
r_values = np.squeeze(r_values)
|
||||||
|
|
||||||
|
# 找到最小误差及其对应的 r 值
|
||||||
|
min_error = np.min(errors)
|
||||||
|
best_r = r_values[np.argmin(errors)] #因为多加了一维,所以这里要反求0
|
||||||
|
|
||||||
|
return [best_r,min_error]
|
||||||
|
|
||||||
|
def projected_measure(self,theta, phi, r,p0):
|
||||||
|
pc = self.to_cartesian(theta, phi)
|
||||||
|
neo_p = r*pc + p0
|
||||||
|
return np.array(neo_p)
|
||||||
|
|
||||||
|
converter = AoAConverter()
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_euclidean_distances(A, BX):
|
||||||
|
# 计算A和B之间的欧式距离
|
||||||
|
B = BX['data_matrix']
|
||||||
|
N = B.shape[0]
|
||||||
|
r_range = np.linspace(-5, 5, 100)
|
||||||
|
if BX.get('AOA_pos'):
|
||||||
|
# 若是来自AOA的数据,则进行替换
|
||||||
|
sensor_pos = BX.get('AOA_pos')
|
||||||
|
ob_pos = A - sensor_pos
|
||||||
|
r0 = np.linalg.norm(ob_pos)
|
||||||
|
B_new = []
|
||||||
|
for i in range(N):
|
||||||
|
theta = B[i,0]
|
||||||
|
phi = B[i,1]
|
||||||
|
[best_r,min_error] = converter.find_best_r(theta, phi,ob_pos, r0+r_range)
|
||||||
|
print(min_error)
|
||||||
|
B_new.append(converter.projected_measure(theta, phi,best_r,sensor_pos))
|
||||||
|
B_new = np.array(B_new)
|
||||||
|
else:
|
||||||
|
B_new = B
|
||||||
|
|
||||||
|
|
||||||
|
distances = np.linalg.norm(A - B_new, axis=1)
|
||||||
|
# 找到最小距离及其索引
|
||||||
|
min_distance_index = np.argmin(distances)
|
||||||
|
min_distance = distances[min_distance_index]
|
||||||
|
return [min_distance, min_distance_index, B_new]
|
||||||
|
|
||||||
|
def are_lists_equal(listA, listB):
|
||||||
|
# 对两个列表中的子列表进行排序
|
||||||
|
if len(listA) == 0:
|
||||||
|
return False
|
||||||
|
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
|
||||||
|
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
|
||||||
|
# 比较排序后的列表是否相等
|
||||||
|
return sorted_listA == sorted_listB
|
||||||
|
|
||||||
|
def sigmoid(x, a=10, b=0.1):
|
||||||
|
# 调整Sigmoid函数使其在x=4时值为0.5
|
||||||
|
# a和b是调整参数,用于控制函数的形状
|
||||||
|
return 1 / (1 + np.exp(-a * (x - 1))) + b
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_correlation(A, B):
|
||||||
|
"""
|
||||||
|
计算两个数组矩阵所有列的相关系数的最大值。
|
||||||
|
|
||||||
|
参数:
|
||||||
|
A -- 第一个NumPy数组
|
||||||
|
B -- 第二个NumPy数组
|
||||||
|
"""
|
||||||
|
A = np.exp(-1j*A/50)
|
||||||
|
B = np.exp(1j*B/50)
|
||||||
|
corr_res = []
|
||||||
|
for col in range(3):
|
||||||
|
a = A[:, col]
|
||||||
|
b = B[:, col]
|
||||||
|
convolution = signal.convolve(a, b[::-1])
|
||||||
|
corr_res.append(convolution)
|
||||||
|
max_corr = np.sum(np.abs(np.array(corr_res)),0)
|
||||||
|
max_corr = np.max(max_corr)/3
|
||||||
|
|
||||||
|
return max_corr
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_history_distances(target, b):
|
||||||
|
# 使用前后向的形式进行计算
|
||||||
|
A = target.m_history
|
||||||
|
v = target.v
|
||||||
|
# 计算每一行与向量b的差的L2范数(欧氏距离)
|
||||||
|
if A.shape[0] < 10:
|
||||||
|
return np.inf
|
||||||
|
local_time = np.linspace(0, 10, 20)
|
||||||
|
local_time = np.expand_dims(local_time, axis=1)
|
||||||
|
v = np.expand_dims(v, axis=1)
|
||||||
|
A_pre = A[-10:,0:3]
|
||||||
|
A_post = np.dot(local_time,v.T)
|
||||||
|
A_all = np.vstack((A_pre, A_post))
|
||||||
|
distances = np.linalg.norm(A_all - b, axis=1)
|
||||||
|
# 找到最小距离
|
||||||
|
min_distance = np.min(distances)
|
||||||
|
|
||||||
|
return min_distance
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# 使用说明,在主机之上选择你的合适的目录
|
||||||
|
# 上传的最新的项目代码,然后把这个脚本放置于你的项目目录之中
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# 修改下面的参数
|
||||||
|
if [[ $# -eq 0 ]]; then
|
||||||
|
echo "tag version is null!"
|
||||||
|
exit 233
|
||||||
|
fi
|
||||||
|
|
||||||
|
tag_version=$1
|
||||||
|
|
||||||
|
echo "start to build docker image tag is => ${tag_version}"
|
||||||
|
docker build -t harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version} .
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "login to docker hub"
|
||||||
|
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "start to push image to hub!"
|
||||||
|
docker push harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version}
|
||||||
374
agent-common/SplitProject/ranjing-python-devfusion/check.py
Normal file
374
agent-common/SplitProject/ranjing-python-devfusion/check.py
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import paho.mqtt.client as mqtt
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
import logging
|
||||||
|
from config import *
|
||||||
|
import datetime
|
||||||
|
import schedule # 需要先安装: pip install schedule
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
# 读取yaml配置
|
||||||
|
def load_mqtt_config():
|
||||||
|
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
return config['mqtt'], config['topics']
|
||||||
|
|
||||||
|
|
||||||
|
# 获取MQTT和topics配置
|
||||||
|
mqtt_config, topics_config = load_mqtt_config()
|
||||||
|
|
||||||
|
# 设置日志配置
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler('check.log'),
|
||||||
|
logging.StreamHandler()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# 存储运行中的任务及其配置
|
||||||
|
running_tasks = {}
|
||||||
|
task_configs = {}
|
||||||
|
|
||||||
|
# 启动 Dev_Fusion.py 的命令模板
|
||||||
|
fusion_command_template = f"nohup python Dev_Fusion.py -t {{task_id}} -g {DEV_FUSION_G} -i {DEV_FUSION_I} > /dev/null 2> error.log &"
|
||||||
|
|
||||||
|
# 日志文件夹路径
|
||||||
|
log_folder = "tasklog"
|
||||||
|
os.makedirs(log_folder, exist_ok=True)
|
||||||
|
|
||||||
|
# 创建全局锁
|
||||||
|
task_lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
def compare_configs(old_config, new_config):
|
||||||
|
"""
|
||||||
|
比较两个配置是否有实质性差异
|
||||||
|
返回 True 表示有差异,需要重启
|
||||||
|
返回 False 表示无差异,只需转发
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# 1. 检查 devices 列表
|
||||||
|
old_devices = old_config.get('devices', [])
|
||||||
|
new_devices = new_config.get('devices', [])
|
||||||
|
|
||||||
|
if len(old_devices) != len(new_devices):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# 为每个设备创建一个关键信息元组进行比较
|
||||||
|
def get_device_key(device):
|
||||||
|
return (
|
||||||
|
device.get('device_id'),
|
||||||
|
device.get('device_topic'),
|
||||||
|
device.get('device_type'),
|
||||||
|
device.get('reference_point')
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
old_device_keys = {get_device_key(d) for d in old_devices}
|
||||||
|
new_device_keys = {get_device_key(d) for d in new_devices}
|
||||||
|
|
||||||
|
# 如果设备的关键信息有变化,需要重启
|
||||||
|
if old_device_keys != new_device_keys:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# 2. 检查参考点
|
||||||
|
old_ref = old_config.get('reference_point')
|
||||||
|
new_ref = new_config.get('reference_point')
|
||||||
|
|
||||||
|
if old_ref != new_ref:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# 3. 其他参数(如 sampling_rate)的变化不需要重启
|
||||||
|
logging.info("No critical configuration changes detected")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error comparing configs: {str(e)}")
|
||||||
|
return True # 出错时视为有差异,安全起见重启实例
|
||||||
|
|
||||||
|
|
||||||
|
def stop_task(task_id):
|
||||||
|
"""停止指定的任务实例"""
|
||||||
|
try:
|
||||||
|
if task_id in running_tasks:
|
||||||
|
process = running_tasks[task_id]
|
||||||
|
# 使用 pkill 命令终止对应的 Python 进程
|
||||||
|
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
|
||||||
|
process.wait(timeout=5) # 等待进程结束
|
||||||
|
del running_tasks[task_id]
|
||||||
|
del task_configs[task_id]
|
||||||
|
logging.info(f"Task {task_id} stopped successfully")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error stopping task {task_id}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
# 多线程处理函数
|
||||||
|
def handle_task(client, task_id, payload):
|
||||||
|
try:
|
||||||
|
with task_lock: # 使用锁保护共享资源
|
||||||
|
data = json.loads(payload)
|
||||||
|
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
|
||||||
|
|
||||||
|
# 记录配置更新
|
||||||
|
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
|
||||||
|
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
def log_config_update(action):
|
||||||
|
with open(log_file, "a") as f:
|
||||||
|
f.write(f"\n=== Configuration Update at {current_time} ===\n")
|
||||||
|
f.write(f"Task ID: {task_id}\n")
|
||||||
|
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
|
||||||
|
f.write(f"Payload: {payload}\n")
|
||||||
|
f.write(f"Action: {action}\n")
|
||||||
|
f.write("=" * 50 + "\n")
|
||||||
|
|
||||||
|
# 检查任务是否已经在运行
|
||||||
|
if task_id in running_tasks:
|
||||||
|
# 检查是否有存储的配置
|
||||||
|
if task_id in task_configs:
|
||||||
|
# 比较新旧配置
|
||||||
|
if compare_configs(task_configs[task_id], data):
|
||||||
|
logging.info(f"Configuration changed for task {task_id}, restarting...")
|
||||||
|
stop_task(task_id)
|
||||||
|
log_config_update("Configuration changed, restarting instance")
|
||||||
|
start_new_instance(client, task_id, payload, data)
|
||||||
|
else:
|
||||||
|
# 配置无变化,只转发消息
|
||||||
|
logging.info(f"No configuration change for task {task_id}, forwarding message")
|
||||||
|
log_config_update("Message forwarded (no critical changes)")
|
||||||
|
client.publish(sensor_topic, payload)
|
||||||
|
else:
|
||||||
|
# 没有存储的配置,存储新配置并转发
|
||||||
|
logging.info(f"No stored config for task {task_id}, storing first config")
|
||||||
|
task_configs[task_id] = data
|
||||||
|
log_config_update("First config stored and forwarded")
|
||||||
|
client.publish(sensor_topic, payload)
|
||||||
|
else:
|
||||||
|
# 任务不存在,启动新实例
|
||||||
|
log_config_update("New instance started")
|
||||||
|
start_new_instance(client, task_id, payload, data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error handling task {task_id}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def start_new_instance(client, task_id, payload, config):
|
||||||
|
"""启动新的 Dev_Fusion 实例"""
|
||||||
|
try:
|
||||||
|
# 启动 Dev_Fusion.py 实例
|
||||||
|
fusion_command = fusion_command_template.format(task_id=task_id)
|
||||||
|
process = subprocess.Popen(fusion_command, shell=True)
|
||||||
|
running_tasks[task_id] = process
|
||||||
|
task_configs[task_id] = config
|
||||||
|
|
||||||
|
logging.info(f"Dev_Fusion.py started successfully for Task ID {task_id}")
|
||||||
|
|
||||||
|
# 保存日志,使用追加模式
|
||||||
|
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
|
||||||
|
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
with open(log_file, "a") as f: # 使用 "a" 模式追加内容
|
||||||
|
f.write(f"\n=== Configuration Update at {current_time} ===\n")
|
||||||
|
f.write(f"Task ID: {task_id}\n")
|
||||||
|
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
|
||||||
|
f.write(f"Payload: {payload}\n")
|
||||||
|
# 记录是否触发了重启
|
||||||
|
f.write("Action: New instance started\n")
|
||||||
|
f.write("=" * 50 + "\n")
|
||||||
|
|
||||||
|
# 等待实例启动
|
||||||
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
# 发送配置
|
||||||
|
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
|
||||||
|
client.publish(sensor_topic, payload)
|
||||||
|
logging.info(f"Configuration sent to {sensor_topic}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error starting new instance for task {task_id}: {str(e)}")
|
||||||
|
if task_id in running_tasks:
|
||||||
|
del running_tasks[task_id]
|
||||||
|
del task_configs[task_id]
|
||||||
|
|
||||||
|
|
||||||
|
# MQTT 回调函数
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
if rc == 0:
|
||||||
|
logging.info("Connected to MQTT broker")
|
||||||
|
client.subscribe(topics_config['mqtt_topic']) # 使用yaml中的topic
|
||||||
|
else:
|
||||||
|
logging.error(f"Connection failed with code {rc}: {DISCONNECT_REASONS.get(rc, 'Unknown error')}")
|
||||||
|
|
||||||
|
|
||||||
|
def on_message(client, userdata, msg):
|
||||||
|
try:
|
||||||
|
payload = msg.payload.decode("utf-8")
|
||||||
|
logging.info(f"Received message on topic {msg.topic}")
|
||||||
|
|
||||||
|
data = json.loads(payload)
|
||||||
|
task_id = data.get("task_id")
|
||||||
|
|
||||||
|
if task_id:
|
||||||
|
thread = threading.Thread(target=handle_task, args=(client, task_id, payload))
|
||||||
|
thread.start()
|
||||||
|
else:
|
||||||
|
logging.warning("Received message without task_id")
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logging.error("Received message is not valid JSON")
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error processing message: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def check_running_instances():
|
||||||
|
"""检查系统中已经运行的 Dev_Fusion 实例"""
|
||||||
|
try:
|
||||||
|
# 使用 ps 命令查找运行中的 Dev_Fusion.py 实例
|
||||||
|
result = subprocess.run("ps aux | grep 'python.*Dev_Fusion.py' | grep -v grep",
|
||||||
|
shell=True, capture_output=True, text=True)
|
||||||
|
|
||||||
|
found_instances = []
|
||||||
|
for line in result.stdout.splitlines():
|
||||||
|
# 从命令行参数中提取 task_id
|
||||||
|
if '-t' in line:
|
||||||
|
parts = line.split()
|
||||||
|
for i, part in enumerate(parts):
|
||||||
|
if part == '-t' and i + 1 < len(parts):
|
||||||
|
task_id = parts[i + 1]
|
||||||
|
pid = parts[1] # 进程 ID 通常在第二列
|
||||||
|
found_instances.append((task_id, pid))
|
||||||
|
|
||||||
|
for task_id, pid in found_instances:
|
||||||
|
logging.info(f"Found running instance for task {task_id}, pid: {pid}")
|
||||||
|
|
||||||
|
# 读取该任务的最新配置
|
||||||
|
config = read_latest_config(task_id)
|
||||||
|
if config:
|
||||||
|
# 将已运行的实例添加到 running_tasks
|
||||||
|
running_tasks[task_id] = subprocess.Popen(['echo', ''], stdout=subprocess.PIPE)
|
||||||
|
running_tasks[task_id].pid = int(pid)
|
||||||
|
task_configs[task_id] = config
|
||||||
|
logging.info(
|
||||||
|
f"Successfully loaded config for task {task_id} from tasklog/received_tasklog_{task_id}.txt")
|
||||||
|
else:
|
||||||
|
logging.warning(f"No valid config found for task {task_id}, stopping instance...")
|
||||||
|
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
|
||||||
|
logging.info(f"Stopped instance {task_id} due to missing config")
|
||||||
|
|
||||||
|
logging.info(f"Finished checking instances. Loaded {len(running_tasks)} tasks with valid configs")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error checking running instances: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def read_latest_config(task_id):
|
||||||
|
"""读取指定任务的最新配置"""
|
||||||
|
try:
|
||||||
|
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
|
||||||
|
if not os.path.exists(log_file):
|
||||||
|
logging.error(f"No log file found for task {task_id}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
with open(log_file, 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# 按配置更新块分割
|
||||||
|
updates = content.split('=== Configuration Update at')
|
||||||
|
if not updates:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 获取最后一个更新块
|
||||||
|
latest_update = updates[-1]
|
||||||
|
|
||||||
|
# 提取 Payload
|
||||||
|
payload_start = latest_update.find('Payload: ') + len('Payload: ')
|
||||||
|
payload_end = latest_update.find('\nAction:')
|
||||||
|
if payload_end == -1: # 如果没有 Action 行
|
||||||
|
payload_end = latest_update.find('\n===')
|
||||||
|
|
||||||
|
if payload_start > 0 and payload_end > payload_start:
|
||||||
|
payload = latest_update[payload_start:payload_end].strip()
|
||||||
|
return json.loads(payload)
|
||||||
|
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error reading latest config for task {task_id}: {str(e)}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def restart_all_instances():
|
||||||
|
"""重启所有运行中的实例"""
|
||||||
|
logging.info("Scheduled restart: Beginning restart of all instances")
|
||||||
|
|
||||||
|
# 复制当前运行的任务列表,因为我们会修改 running_tasks
|
||||||
|
tasks_to_restart = list(running_tasks.keys())
|
||||||
|
|
||||||
|
for task_id in tasks_to_restart:
|
||||||
|
try:
|
||||||
|
# 读取最新配置
|
||||||
|
config = read_latest_config(task_id)
|
||||||
|
if not config:
|
||||||
|
logging.error(f"Could not find latest config for task {task_id}, skipping restart")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 停止当前实例
|
||||||
|
logging.info(f"Stopping task {task_id} for scheduled restart")
|
||||||
|
stop_task(task_id)
|
||||||
|
|
||||||
|
# 将配置转换为 JSON 字符串
|
||||||
|
payload = json.dumps(config)
|
||||||
|
|
||||||
|
# 启动新实例
|
||||||
|
logging.info(f"Starting new instance for task {task_id} with latest config")
|
||||||
|
start_new_instance(mqtt_client, task_id, payload, config)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error restarting task {task_id}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def setup_scheduled_restart(restart_time="03:00"):
|
||||||
|
"""设置定时重启任务"""
|
||||||
|
schedule.every().day.at(restart_time).do(restart_all_instances)
|
||||||
|
|
||||||
|
def run_schedule():
|
||||||
|
while True:
|
||||||
|
schedule.run_pending()
|
||||||
|
time.sleep(30) # 每30秒检查一次
|
||||||
|
|
||||||
|
# 启动调度器线程
|
||||||
|
scheduler_thread = threading.Thread(target=run_schedule, daemon=True)
|
||||||
|
scheduler_thread.start()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
global mqtt_client # 添加全局变量以在重启时使用
|
||||||
|
|
||||||
|
# 在启动时检查已运行的实例
|
||||||
|
check_running_instances()
|
||||||
|
|
||||||
|
# 创建 MQTT 客户端
|
||||||
|
mqtt_client = mqtt.Client()
|
||||||
|
mqtt_client.on_connect = on_connect
|
||||||
|
mqtt_client.on_message = on_message
|
||||||
|
mqtt_client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
|
||||||
|
|
||||||
|
# 设置定时重启(默认凌晨3点)
|
||||||
|
setup_scheduled_restart()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
mqtt_client.connect(mqtt_config['broker'], mqtt_config['port'], 60)
|
||||||
|
mqtt_client.loop_forever()
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"MQTT connection error: {str(e)}")
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
mqtt:
|
||||||
|
broker: "192.168.35.178"
|
||||||
|
port: 31884
|
||||||
|
username: "cmlc"
|
||||||
|
password: "4YPk*DS%+5"
|
||||||
|
|
||||||
|
topics:
|
||||||
|
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
|
||||||
67
agent-common/SplitProject/ranjing-python-devfusion/config.py
Normal file
67
agent-common/SplitProject/ranjing-python-devfusion/config.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# # MQTT 配置
|
||||||
|
# broker = "192.168.35.178" # 代理地址
|
||||||
|
# port = 31883 # 端口
|
||||||
|
# username = "cmlc"
|
||||||
|
# password = "odD8#Ve7.B"
|
||||||
|
#
|
||||||
|
# # check.py 使用的topic
|
||||||
|
# MQTT_TOPIC = "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
#
|
||||||
|
# # Dev_Fusion.py 使用的topic
|
||||||
|
# SENSOR_TOPIC = "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
|
||||||
|
# 在 check 中去配置 Dev_Fusion.py启动命令
|
||||||
|
DEV_FUSION_G = 40 # 参数 g
|
||||||
|
DEV_FUSION_I = 0.6 # 参数 i
|
||||||
|
|
||||||
|
#KF_V2设置
|
||||||
|
shift_value = 1
|
||||||
|
survive_initial = 0.25
|
||||||
|
decay = 0.7
|
||||||
|
show_thres = 0.4
|
||||||
|
reference_point = (104.08, 30.51)
|
||||||
|
|
||||||
|
# logs 配置
|
||||||
|
DISCONNECT_REASONS = {
|
||||||
|
0: "正常断开",
|
||||||
|
1: "协议版本不匹配",
|
||||||
|
2: "客户端标识符无效",
|
||||||
|
3: "服务器不可用",
|
||||||
|
4: "用户名或密码错误",
|
||||||
|
5: "未授权",
|
||||||
|
6: "消息代理不可用",
|
||||||
|
7: "TLS错误",
|
||||||
|
8: "QoS不支持",
|
||||||
|
9: "客户端已被禁止",
|
||||||
|
10: "服务器繁忙",
|
||||||
|
11: "客户端已被禁止(证书相关)",
|
||||||
|
128: "未指定错误",
|
||||||
|
129: "畸形数据包",
|
||||||
|
130: "协议错误",
|
||||||
|
131: "通信错误",
|
||||||
|
132: "服务器保持连接超时",
|
||||||
|
133: "服务器内部错误",
|
||||||
|
134: "服务器正在关闭",
|
||||||
|
135: "服务器资源不足",
|
||||||
|
136: "客户端网络套接字错误",
|
||||||
|
137: "服务器正在关闭连接",
|
||||||
|
138: "服务器拒绝连接",
|
||||||
|
139: "服务器不支持该版本",
|
||||||
|
140: "客户端ID已被使用",
|
||||||
|
141: "连接速率超限",
|
||||||
|
142: "最大连接数超限",
|
||||||
|
143: "保持连接超时",
|
||||||
|
144: "会话被接管",
|
||||||
|
145: "连接已断开",
|
||||||
|
146: "主题别名无效",
|
||||||
|
147: "数据包太大",
|
||||||
|
148: "消息速率太高",
|
||||||
|
149: "配额超限",
|
||||||
|
150: "管理行为",
|
||||||
|
151: "无效的负载格式",
|
||||||
|
152: "保留未支持",
|
||||||
|
153: "QoS未支持",
|
||||||
|
154: "使用另一个服务器",
|
||||||
|
155: "服务器已迁移",
|
||||||
|
156: "连接不支持",
|
||||||
|
}
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
mqtt:
|
||||||
|
broker: "192.168.35.178"
|
||||||
|
port: 31883
|
||||||
|
username: "cmlc"
|
||||||
|
password: "odD8#Ve7.B"
|
||||||
|
|
||||||
|
topics:
|
||||||
|
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
|
||||||
|
Write-Host "1. Uploading binary exec..." -ForegroundColor Green
|
||||||
|
ssh root@192.168.35.71 "mkdir -p /root/wdd/ranjing-python-devfusion/"
|
||||||
|
scp C:\Users\wdd\IdeaProjects\ProjectOctopus\agent-common\SplitProject\ranjing-python-devfusion\* root@192.168.35.71:/root/wdd/ranjing-python-devfusion/
|
||||||
|
|
||||||
|
Write-Host "2. Exec the command ..." -ForegroundColor Blue
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host ""
|
||||||
|
ssh root@192.168.35.71 "cd /root/wdd/ranjing-python-devfusion/ && docker build -t ranjing/dev-fusion:v1.0 ."
|
||||||
|
Write-Host ""
|
||||||
|
Write-Host ""
|
||||||
|
} catch {
|
||||||
|
Write-Host "操作失败: $_" -ForegroundColor Red
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
Binary file not shown.
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
docker run --name devfusion \
|
||||||
|
-d \
|
||||||
|
--rm \
|
||||||
|
-v /root/wdd/ranjing-python-devfusion/config-dev.yaml:/dev-fusion/config.yaml \
|
||||||
|
harbor.cdcdyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0
|
||||||
@@ -0,0 +1,62 @@
|
|||||||
|
from math import radians, cos, degrees
|
||||||
|
from math import radians, degrees, sin, cos, atan2, sqrt
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_cartesian(lat, lon, reference_point):
|
||||||
|
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
|
||||||
|
# 地球椭球参数(WGS84)
|
||||||
|
a = 6378137.0 # 长半轴,单位:米
|
||||||
|
f = 1 / 298.257223563 # 扁率
|
||||||
|
e2 = 2 * f - f ** 2 # 第一偏心率平方
|
||||||
|
|
||||||
|
# 提取参考点坐标
|
||||||
|
ref_lat, ref_lon = reference_point
|
||||||
|
|
||||||
|
# 转换成弧度
|
||||||
|
lat_rad = radians(lat)
|
||||||
|
lon_rad = radians(lon)
|
||||||
|
ref_lat_rad = radians(ref_lat)
|
||||||
|
ref_lon_rad = radians(ref_lon)
|
||||||
|
|
||||||
|
# 计算曲率半径
|
||||||
|
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
|
||||||
|
|
||||||
|
# 计算基于参考点的平面直角坐标
|
||||||
|
delta_lon = lon_rad - ref_lon_rad
|
||||||
|
X = (N + 0) * cos(ref_lat_rad) * delta_lon
|
||||||
|
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
|
||||||
|
|
||||||
|
return X, Y
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_geodetic(x, y, reference_point):
|
||||||
|
"""将直角坐标转换为经纬度,考虑地球椭球模型"""
|
||||||
|
# 地球椭球参数(WGS84)
|
||||||
|
a = 6378137.0 # 长半轴,单位:米
|
||||||
|
f = 1 / 298.257223563 # 扁率
|
||||||
|
e2 = 2 * f - f ** 2 # 第一偏心率平方
|
||||||
|
|
||||||
|
# 提取参考点坐标
|
||||||
|
ref_lat, ref_lon = reference_point
|
||||||
|
|
||||||
|
# 转换成弧度
|
||||||
|
ref_lat_rad = radians(ref_lat)
|
||||||
|
ref_lon_rad = radians(ref_lon)
|
||||||
|
|
||||||
|
# 计算曲率半径
|
||||||
|
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
|
||||||
|
|
||||||
|
# 计算纬度
|
||||||
|
lat_rad = y * (1 - e2 * sin(ref_lat_rad) ** 2) / (a * (1 - e2)) + ref_lat_rad
|
||||||
|
|
||||||
|
# 计算经度
|
||||||
|
if cos(ref_lat_rad) == 0:
|
||||||
|
lon_rad = 0
|
||||||
|
else:
|
||||||
|
lon_rad = x / ((N + 0) * cos(ref_lat_rad)) + ref_lon_rad
|
||||||
|
|
||||||
|
# 转换回角度
|
||||||
|
lat = degrees(lat_rad)
|
||||||
|
lon = degrees(lon_rad)
|
||||||
|
|
||||||
|
return lat, lon
|
||||||
423
agent-common/SplitProject/ranjing-python-devfusion/utils.py
Normal file
423
agent-common/SplitProject/ranjing-python-devfusion/utils.py
Normal file
@@ -0,0 +1,423 @@
|
|||||||
|
import datetime
|
||||||
|
from transformation import *
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class Pipeline:
|
||||||
|
def __init__(self, fusion_parameters_topic,reference_point):
|
||||||
|
self.fusion_parameters_topic = fusion_parameters_topic
|
||||||
|
self.task_id = '554343465692430336'
|
||||||
|
self.reference_point = reference_point
|
||||||
|
# self.deviceId = deviceId
|
||||||
|
self.sensor_id_list = ["10000000000"]
|
||||||
|
self.fusionCode = 'DPZYLY'
|
||||||
|
self.publish_topic = f"bridge/{self.fusionCode}/device_data/fusion/{self.task_id}"
|
||||||
|
self.priority_dict = {"10000000000":1}
|
||||||
|
self.uavInfo_bucket = dict()
|
||||||
|
self.target_bowl = dict()
|
||||||
|
self.device_info_dict = dict()
|
||||||
|
self.device_type_mapping = {
|
||||||
|
"5ga": 0,
|
||||||
|
"radar": 1,
|
||||||
|
"spec": 2,
|
||||||
|
"oe": 3,
|
||||||
|
"cm": 4,
|
||||||
|
"dec": 5,
|
||||||
|
"ifr": 6,
|
||||||
|
"cv": 7,
|
||||||
|
"isrs": 8,
|
||||||
|
"aoa": 9,
|
||||||
|
"tdoa": 10,
|
||||||
|
"dcd": 11,
|
||||||
|
"direct": 100,
|
||||||
|
"rtk": 101,
|
||||||
|
"rid": 102,
|
||||||
|
"fusion": 1000,
|
||||||
|
"other": 999 # 假设 'other' 对应于未知设备类型
|
||||||
|
}
|
||||||
|
self.device_type_speedrank = {
|
||||||
|
"radar": 1,
|
||||||
|
"spec": 2,
|
||||||
|
"oe": 3,
|
||||||
|
"cm": 4,
|
||||||
|
"dec": 5,
|
||||||
|
"ifr": 6,
|
||||||
|
"cv": 7,
|
||||||
|
"isrs": 8,
|
||||||
|
"aoa": 9,
|
||||||
|
"tdoa": 10,
|
||||||
|
"dcd": 13,
|
||||||
|
"direct": 12,
|
||||||
|
"5ga": 11,
|
||||||
|
"rid": 14,
|
||||||
|
"rtk": 15,
|
||||||
|
"other": 0 # 假设 'other' 对应于未知设备类型
|
||||||
|
}
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
def process_json_data(self, json_data):
|
||||||
|
"""
|
||||||
|
将 JSON 数据转换为字典,并添加 X 和 Y 属性。
|
||||||
|
"""
|
||||||
|
data_dict = json.loads(json_data)
|
||||||
|
|
||||||
|
# 安全访问 'ptTime' 键
|
||||||
|
pt_time = data_dict.get('ptTime')
|
||||||
|
if pt_time is not None:
|
||||||
|
print(pt_time)
|
||||||
|
else:
|
||||||
|
print("Key 'ptTime' not found in data_dict.")
|
||||||
|
|
||||||
|
# 安全访问 'objects' 键
|
||||||
|
objects = data_dict.get('objects')
|
||||||
|
if objects is None:
|
||||||
|
print("Key 'objects' not found in data_dict.")
|
||||||
|
return data_dict # 如果 'objects' 键不存在,直接返回原始字典或根据需要进行其他处理
|
||||||
|
|
||||||
|
# 如果 'objects' 键存在,继续处理
|
||||||
|
for record in objects:
|
||||||
|
# 检查 'latitude' 和 'longitude' 键是否存在于 record 中
|
||||||
|
if 'latitude' in record and 'longitude' in record:
|
||||||
|
lat = record['latitude']
|
||||||
|
lon = record['longitude']
|
||||||
|
X, Y = convert_to_cartesian(lat, lon, self.reference_point)
|
||||||
|
record['X'] = X
|
||||||
|
record['Y'] = Y
|
||||||
|
else:
|
||||||
|
print("Record is missing 'latitude' or 'longitude' keys.")
|
||||||
|
|
||||||
|
return data_dict
|
||||||
|
|
||||||
|
def data_encoder(self, data_list):
|
||||||
|
"""
|
||||||
|
生成数据矩阵和 ID 列表。
|
||||||
|
"""
|
||||||
|
sensor_data = []
|
||||||
|
for sensor_id in self.sensor_id_list:
|
||||||
|
temp = {'data_matrix': [],
|
||||||
|
'id_list': [],
|
||||||
|
'deviceId': sensor_id,
|
||||||
|
'latest_time': [],
|
||||||
|
'priority':1}
|
||||||
|
for record in data_list:
|
||||||
|
if record.get('noteData'):
|
||||||
|
obj = record['noteData']
|
||||||
|
obj['objectId'] = obj['uasId']
|
||||||
|
obj['deviceId'] = obj["extension"]['deviceId']
|
||||||
|
record['objects'] = [obj]
|
||||||
|
|
||||||
|
if record['deviceId'] == sensor_id:
|
||||||
|
temp['priority'] = self.priority_dict[sensor_id]
|
||||||
|
if record.get('objects'):
|
||||||
|
for obj in record['objects']:
|
||||||
|
if obj['objectId'] in temp['id_list']:
|
||||||
|
position = temp['id_list'].index(obj['objectId'])
|
||||||
|
if int(record['ptTime']) > int(temp['latest_time'][position]):
|
||||||
|
temp['data_matrix'][position] = [obj['X'], obj['Y'], obj['altitude']]
|
||||||
|
else:
|
||||||
|
temp['data_matrix'].append([obj['X'], obj['Y'], obj['altitude']])
|
||||||
|
temp['id_list'].append(obj['objectId'])
|
||||||
|
temp['latest_time'].append(record['ptTime'])
|
||||||
|
# 把扩展地段写入
|
||||||
|
if obj.get('extension'):
|
||||||
|
B_id = str(record['deviceId'])+str(obj['objectId'])
|
||||||
|
self.uavInfo_bucket[B_id] = obj['extension']
|
||||||
|
# 如果对象有speed字段,将其添加到extension中
|
||||||
|
if obj.get('speed'):
|
||||||
|
self.uavInfo_bucket[B_id]['speed'] = obj['speed']
|
||||||
|
# 如果对象有height字段,也存储它
|
||||||
|
if obj.get('height'):
|
||||||
|
self.uavInfo_bucket[B_id]['height'] = obj['height']
|
||||||
|
|
||||||
|
# 写入到数据字典中
|
||||||
|
temp['data_matrix'] = np.array(temp['data_matrix'])
|
||||||
|
sensor_data.append(temp)
|
||||||
|
return sensor_data
|
||||||
|
|
||||||
|
def process_extension(self, target):
|
||||||
|
# 定义一个字典,包含给定的键值对
|
||||||
|
extension = {
|
||||||
|
"objectType": 30,
|
||||||
|
"uavSN": "Un-known",
|
||||||
|
"uavModel": "Un-known",
|
||||||
|
"pilotLat": 0.0,
|
||||||
|
"pilotLon": 0.0,
|
||||||
|
"speedX": 0.0,
|
||||||
|
"speedY": 0.0,
|
||||||
|
"speedZ": 0.0,
|
||||||
|
"time": 0.0,
|
||||||
|
"born_time": 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
# 从target_bowl获取历史值
|
||||||
|
if target['objectId'] in self.target_bowl.keys():
|
||||||
|
extension = self.target_bowl[target['objectId']]
|
||||||
|
|
||||||
|
result_source = target['source']
|
||||||
|
# 对数据进行更新
|
||||||
|
for source in result_source:
|
||||||
|
id = str(source[0]) + str(source[1])
|
||||||
|
if self.uavInfo_bucket.get(id):
|
||||||
|
for key, value in self.uavInfo_bucket[id].items():
|
||||||
|
# 只有当新值是有效值时才更新
|
||||||
|
if value not in ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]:
|
||||||
|
extension[key] = value
|
||||||
|
|
||||||
|
extension['born_time'] = int(target['born_time'])
|
||||||
|
|
||||||
|
# 更新target_bowl以保持状态
|
||||||
|
self.target_bowl[target['objectId']] = extension
|
||||||
|
|
||||||
|
return extension
|
||||||
|
|
||||||
|
def data_decoder(self, filtered_results):
|
||||||
|
"""
|
||||||
|
解码过滤后的结果。
|
||||||
|
"""
|
||||||
|
current_time = datetime.datetime.now()
|
||||||
|
timestamp = int(current_time.timestamp() * 1000)
|
||||||
|
combined_objects = []
|
||||||
|
for target in filtered_results:
|
||||||
|
X = target['X']
|
||||||
|
Y = target['Y']
|
||||||
|
Z = target['Z'] # 这里的Z实际上是altitude
|
||||||
|
lat, lon = convert_to_geodetic(X, Y, self.reference_point)
|
||||||
|
extension = self.process_extension(target)
|
||||||
|
extension['time'] = int(timestamp)
|
||||||
|
extension['born_time'] = int(int(target['born_time']) / 1000) # 毫秒单位数据
|
||||||
|
|
||||||
|
new_origin_source = []
|
||||||
|
for source in target['source']:
|
||||||
|
device_id, object_id = source
|
||||||
|
# 从 device_info_dict 获取设备缩写
|
||||||
|
device_abbreviation = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
|
||||||
|
# 使用映射字典获取设备类型
|
||||||
|
device_type = self.device_type_mapping.get(device_abbreviation, 999)
|
||||||
|
new_origin_source.append(f"{device_type}_{device_id}_{object_id}")
|
||||||
|
|
||||||
|
# 根据优先级顺序选择速度
|
||||||
|
highest_priority_speed = None
|
||||||
|
highest_priority = float('inf')
|
||||||
|
|
||||||
|
for source in target['source']:
|
||||||
|
device_id, object_id = source
|
||||||
|
B_id = str(device_id) + str(object_id)
|
||||||
|
if self.uavInfo_bucket.get(B_id):
|
||||||
|
device_type = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
|
||||||
|
priority = self.device_type_speedrank.get(device_type, float('inf'))
|
||||||
|
|
||||||
|
if priority < highest_priority:
|
||||||
|
highest_priority = priority
|
||||||
|
# 获取速度并进行单位转换
|
||||||
|
speed = self.uavInfo_bucket[B_id].get('speed', target['speed'])
|
||||||
|
if device_type == "5ga": # 如果设备类型是5ga,进行转换
|
||||||
|
speed = speed / 3.6 # 从 km/h 转换为 m/s
|
||||||
|
highest_priority_speed = speed
|
||||||
|
|
||||||
|
# 确保 highest_priority_speed 是从设备获取的速度
|
||||||
|
if highest_priority_speed is None:
|
||||||
|
# 如果没有找到当前速度,查找历史记录中的速度
|
||||||
|
for obj in reversed(combined_objects):
|
||||||
|
if obj["objectId"] == target['objectId']:
|
||||||
|
highest_priority_speed = obj.get("speed")
|
||||||
|
break
|
||||||
|
|
||||||
|
if highest_priority_speed is None:
|
||||||
|
print(f"Warning: No speed found for target {target['objectId']}, using default target speed.")
|
||||||
|
new_speed = target['speed']
|
||||||
|
else:
|
||||||
|
new_speed = highest_priority_speed
|
||||||
|
else:
|
||||||
|
new_speed = highest_priority_speed
|
||||||
|
|
||||||
|
# Debug 输出,检查速度来源
|
||||||
|
print(f"Selected speed for target {target['objectId']}: {new_speed} from device with priority {highest_priority}")
|
||||||
|
|
||||||
|
# 获取height字段
|
||||||
|
height = None
|
||||||
|
for source in target['source']:
|
||||||
|
device_id, object_id = source
|
||||||
|
B_id = str(device_id) + str(object_id)
|
||||||
|
if self.uavInfo_bucket.get(B_id):
|
||||||
|
if self.uavInfo_bucket[B_id].get('height'):
|
||||||
|
height = self.uavInfo_bucket[B_id]['height']
|
||||||
|
break
|
||||||
|
|
||||||
|
# 如果当前没有获取到height,查找历史记录中的height
|
||||||
|
if height is None:
|
||||||
|
for obj in reversed(combined_objects):
|
||||||
|
if obj["objectId"] == target['objectId']:
|
||||||
|
prev_height = obj.get("height")
|
||||||
|
if prev_height is not None: # 如果找到有效的历史height
|
||||||
|
height = prev_height
|
||||||
|
break
|
||||||
|
|
||||||
|
# 如果仍然没有找到height,保持上一次的最新历史height
|
||||||
|
if height is None and combined_objects:
|
||||||
|
for obj in reversed(combined_objects):
|
||||||
|
if obj["objectId"] == target['objectId']:
|
||||||
|
height = obj.get("height")
|
||||||
|
break
|
||||||
|
|
||||||
|
temp = {
|
||||||
|
# "msg_cnt":result['msg_cnt'],#增加msg_cnt用于检测有无丢包
|
||||||
|
"objectId": target['objectId'],
|
||||||
|
"X": X,
|
||||||
|
"Y": Y,
|
||||||
|
"height": height, # 使用当前height或历史height
|
||||||
|
"altitude": Z,
|
||||||
|
"speed": new_speed, # 使用优先级最高的速度
|
||||||
|
'latitude': lat,
|
||||||
|
'longitude': lon,
|
||||||
|
'sigma': target['sigma'],
|
||||||
|
"extension": {
|
||||||
|
"origin_source": new_origin_source, # 更新后的 origin_source
|
||||||
|
# 其他extension字段...
|
||||||
|
"objectType": extension.get('objectType', 0),
|
||||||
|
"uavSN": extension.get("uavSN", "Un-known"),
|
||||||
|
"uavModel": extension.get("uavModel", "Un-known"),
|
||||||
|
"pilotLat": extension.get("pilotLat", 0.0),
|
||||||
|
"pilotLon": extension.get("pilotLon", 0.0),
|
||||||
|
"speedX": 0.0, # 不再使用速度分量
|
||||||
|
"speedY": 0.0,
|
||||||
|
"speedZ": 0.0,
|
||||||
|
"time": int(timestamp),
|
||||||
|
"born_time": int(int(target['born_time']) / 1000),
|
||||||
|
},
|
||||||
|
"time": int(timestamp),
|
||||||
|
}
|
||||||
|
|
||||||
|
# 检查extension中的objectType是否已经被设置为非0值,如果是,则不再覆盖.
|
||||||
|
if extension.get('objectType', 0) != 0 or target['objectId'] not in [obj['objectId'] for obj in
|
||||||
|
combined_objects]:
|
||||||
|
temp["extension"]["objectType"] = extension.get('objectType', 0)
|
||||||
|
else:
|
||||||
|
# 查找combined_objects中相同objectId的objectType,如果不存在则使用0
|
||||||
|
existing_object_types = [obj["extension"].get('objectType', 0) for obj in combined_objects if
|
||||||
|
obj["objectId"] == target['objectId']]
|
||||||
|
if existing_object_types and existing_object_types[0] != 0:
|
||||||
|
temp["extension"]["objectType"] = existing_object_types[0]
|
||||||
|
else:
|
||||||
|
temp["extension"]["objectType"] = 0
|
||||||
|
|
||||||
|
# 检查并更新uavSN和uavModel
|
||||||
|
invalid_values = ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]
|
||||||
|
|
||||||
|
# 检查uavSN是否为字母数字组合防止其他部分引入奇怪的值
|
||||||
|
current_sn = extension.get('uavSN', "Un-known")
|
||||||
|
if isinstance(current_sn, str):
|
||||||
|
has_letter = any(c.isalpha() for c in current_sn)
|
||||||
|
has_digit = any(c.isdigit() for c in current_sn)
|
||||||
|
if not (has_letter and has_digit):
|
||||||
|
# 先查找相同objectId的历史有效SN
|
||||||
|
for obj in reversed(combined_objects):
|
||||||
|
if obj["objectId"] == target['objectId']:
|
||||||
|
prev_sn = obj["extension"].get("uavSN", "Un-known")
|
||||||
|
if isinstance(prev_sn, str):
|
||||||
|
has_letter = any(c.isalpha() for c in prev_sn)
|
||||||
|
has_digit = any(c.isdigit() for c in prev_sn)
|
||||||
|
if has_letter and has_digit:
|
||||||
|
current_sn = prev_sn
|
||||||
|
break
|
||||||
|
temp["extension"]["uavSN"] = current_sn
|
||||||
|
temp["extension"]["uavModel"] = extension.get('uavModel', "Un-known")
|
||||||
|
|
||||||
|
combined_objects.append(temp)
|
||||||
|
|
||||||
|
data_processed = {
|
||||||
|
"deviceType": 1000,
|
||||||
|
"providerCode": "DPZYLY",
|
||||||
|
"deviceId": self.task_id,
|
||||||
|
"objects": combined_objects,
|
||||||
|
"ptTime": int(timestamp)
|
||||||
|
}
|
||||||
|
# 筛选有意义的数据
|
||||||
|
if data_processed and data_processed.get("objects") and len(data_processed["objects"]) > 0:
|
||||||
|
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
with open('PB_log.txt', 'a') as log_file: # 以追加模式打开日志文件
|
||||||
|
log_file.write('=====================\n') # 写入分隔符
|
||||||
|
log_file.write(f"time: {formatted_time}\n") # 写入时间戳
|
||||||
|
log_file.write(f"data: {data_processed}\n")
|
||||||
|
return data_processed
|
||||||
|
|
||||||
|
def extract_parms(self, parm_data):
|
||||||
|
"""
|
||||||
|
提取参数。
|
||||||
|
"""
|
||||||
|
id_list = [] # 存储设备ID
|
||||||
|
priority_dict = {} # 存储设备优先级
|
||||||
|
device_info_dict = {} # 新增:存储设备详细信息的字典,用于后续拿到与
|
||||||
|
|
||||||
|
data_dict = json.loads(parm_data)
|
||||||
|
print(data_dict)
|
||||||
|
self.task_id = data_dict['task_id']
|
||||||
|
new_topics = [("fromcheck/DPZYLY/fly_data/rtk/#", 0)]
|
||||||
|
|
||||||
|
devices = data_dict['devices']
|
||||||
|
for device in devices:
|
||||||
|
device_id = device['device_id']
|
||||||
|
if device_id:
|
||||||
|
id_list.append(device_id)
|
||||||
|
new_topics.append((device["device_topic"], 0))
|
||||||
|
|
||||||
|
# 存储设备优先级,默认优先级为1
|
||||||
|
if device.get('priority'):
|
||||||
|
priority_dict[device_id] = device['priority']
|
||||||
|
else:
|
||||||
|
priority_dict[device_id] = 1
|
||||||
|
|
||||||
|
# 使用列表存储设备的详细信息(topic、type、sampling_rate),完成一对多
|
||||||
|
device_info_dict[device_id] = {
|
||||||
|
'device_topic': device['device_topic'],
|
||||||
|
'device_type': device['device_type'],
|
||||||
|
'sampling_rate': device['properties'].get('sampling_rate', 1) # 默认为None,如果没有提供
|
||||||
|
}
|
||||||
|
|
||||||
|
self.priority_dict = priority_dict
|
||||||
|
self.device_info_dict = device_info_dict # 将设备信息字典存储到实例变量中
|
||||||
|
self.sensor_id_list = id_list
|
||||||
|
|
||||||
|
# 处理参考点
|
||||||
|
if data_dict.get('reference_point'):
|
||||||
|
try:
|
||||||
|
original_reference_point = data_dict['reference_point']
|
||||||
|
if len(original_reference_point) == 2: # 确保是包含两个元素的元组或列表
|
||||||
|
self.reference_point = (
|
||||||
|
float(original_reference_point[0]) + 0,
|
||||||
|
float(original_reference_point[1]) + 0
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid reference_point structure. Must be a tuple or list with two elements.")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing reference_point: {e}")
|
||||||
|
self.reference_point = None # 或者设置为某个默认值
|
||||||
|
|
||||||
|
return new_topics
|
||||||
|
|
||||||
|
def extract_fusion_parms(self,parm_data):
|
||||||
|
data_dict = json.loads(parm_data)
|
||||||
|
# 定义 fusion_dict 字典,包含需要从 data_dict 中提取的键
|
||||||
|
fusion_dict = {
|
||||||
|
"fusion_type": 1,
|
||||||
|
"gate": 1,
|
||||||
|
"interval": 1,
|
||||||
|
"show_thres": 0.4
|
||||||
|
}
|
||||||
|
|
||||||
|
# 检查 data_dict 中是否存在对应的键,并更新 fusion_dict 中的值
|
||||||
|
if "fusion_type" in data_dict:
|
||||||
|
fusion_dict["fusion_type"] = data_dict["fusion_type"]
|
||||||
|
|
||||||
|
if "gate" in data_dict:
|
||||||
|
fusion_dict["gate"] = data_dict["gate"]
|
||||||
|
|
||||||
|
if "interval" in data_dict:
|
||||||
|
fusion_dict["interval"] = data_dict["interval"]
|
||||||
|
|
||||||
|
if "show_thres" in data_dict:
|
||||||
|
fusion_dict["show_thres"] = data_dict["show_thres"]
|
||||||
|
|
||||||
|
# 返回更新后的 fusion_dict
|
||||||
|
return fusion_dict
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: fe-configmap
|
||||||
|
namespace: doriscluster
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: fe
|
||||||
|
data:
|
||||||
|
fe.conf: |
|
||||||
|
CUR_DATE=`date +%Y%m%d-%H%M%S`
|
||||||
|
|
||||||
|
# the output dir of stderr and stdout
|
||||||
|
LOG_DIR = ${DORIS_HOME}/log
|
||||||
|
|
||||||
|
JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE"
|
||||||
|
|
||||||
|
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
|
||||||
|
JAVA_OPTS_FOR_JDK_9="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xlog:gc*:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time"
|
||||||
|
|
||||||
|
# INFO, WARN, ERROR, FATAL
|
||||||
|
sys_log_level = INFO
|
||||||
|
|
||||||
|
# NORMAL, BRIEF, ASYNC
|
||||||
|
sys_log_mode = NORMAL
|
||||||
|
|
||||||
|
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
|
||||||
|
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
|
||||||
|
|
||||||
|
http_port = 8030
|
||||||
|
arrow_flight_sql_port = 9090
|
||||||
|
rpc_port = 9020
|
||||||
|
query_port = 9030
|
||||||
|
edit_log_port = 9010
|
||||||
|
|
||||||
|
enable_fqdn_mode = true
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: be-configmap
|
||||||
|
namespace: doriscluster
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: be
|
||||||
|
data:
|
||||||
|
be.conf: |
|
||||||
|
CUR_DATE=`date +%Y%m%d-%H%M%S`
|
||||||
|
|
||||||
|
PPROF_TMPDIR="$DORIS_HOME/log/"
|
||||||
|
|
||||||
|
JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
|
||||||
|
|
||||||
|
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
|
||||||
|
JAVA_OPTS_FOR_JDK_9="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
|
||||||
|
|
||||||
|
# since 1.2, the JAVA_HOME need to be set to run BE process.
|
||||||
|
# JAVA_HOME=/path/to/jdk/
|
||||||
|
|
||||||
|
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
|
||||||
|
# https://jemalloc.net/jemalloc.3.html
|
||||||
|
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,lg_tcache_max:20,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
|
||||||
|
JEMALLOC_PROF_PRFIX=""
|
||||||
|
|
||||||
|
# INFO, WARNING, ERROR, FATAL
|
||||||
|
sys_log_level = INFO
|
||||||
|
|
||||||
|
# ports for admin, web, heartbeat service
|
||||||
|
be_port = 9060
|
||||||
|
webserver_port = 8040
|
||||||
|
heartbeat_service_port = 9050
|
||||||
|
arrow_flight_sql_port = 39091
|
||||||
|
brpc_port = 8060
|
||||||
@@ -0,0 +1,94 @@
|
|||||||
|
apiVersion: doris.selectdb.com/v1
|
||||||
|
kind: DorisCluster
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
name: doriscluster-helm
|
||||||
|
namespace: doriscluster
|
||||||
|
spec:
|
||||||
|
feSpec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- doris
|
||||||
|
replicas: 3
|
||||||
|
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
|
||||||
|
limits:
|
||||||
|
cpu: 8
|
||||||
|
memory: 16Gi
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
memory: 6Gi
|
||||||
|
configMapInfo:
|
||||||
|
# use kubectl create configmap fe-configmap --from-file=fe.conf
|
||||||
|
configMapName: fe-configmap
|
||||||
|
resolveKey: fe.conf
|
||||||
|
persistentVolumes:
|
||||||
|
- mountPath: /opt/apache-doris/fe/doris-meta
|
||||||
|
name: doris-fe-2000g
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-nfs-sc-56
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 2000Gi
|
||||||
|
- mountPath: /opt/apache-doris/fe/jdbc_drivers
|
||||||
|
name: doriscluster-storage-fe-jdbc-drivers
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-nfs-sc-58
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 100Gi
|
||||||
|
beSpec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- doris
|
||||||
|
replicas: 3
|
||||||
|
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
|
||||||
|
limits:
|
||||||
|
cpu: 8
|
||||||
|
memory: 24Gi
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
memory: 6Gi
|
||||||
|
configMapInfo:
|
||||||
|
# use kubectl create configmap be-configmap --from-file=be.conf
|
||||||
|
configMapName: be-configmap
|
||||||
|
resolveKey: be.conf
|
||||||
|
persistentVolumes:
|
||||||
|
- mountPath: /opt/apache-doris/be/storage
|
||||||
|
name: doris-1-9000g-pvc
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-nfs-sc-57
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 9000Gi
|
||||||
|
- mountPath: /opt/apache-doris/be/jdbc_drivers
|
||||||
|
name: doriscluster-storage-be-jdbc-drivers
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-nfs-sc-58
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1000Gi
|
||||||
119
agent-common/SplitProject/监管平台-Doris-集群部署/doris-deplyment.yaml
Normal file
119
agent-common/SplitProject/监管平台-Doris-集群部署/doris-deplyment.yaml
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
apiVersion: doris.selectdb.com/v1
|
||||||
|
kind: DorisCluster
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: doriscluster
|
||||||
|
name: doriscluster-helm
|
||||||
|
namespace: doriscluster
|
||||||
|
spec:
|
||||||
|
feSpec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: doris-deploy
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- "true"
|
||||||
|
replicas: 1
|
||||||
|
image: 172.16.100.55:8033/cmii/doris.fe-ubuntu:2.1.6
|
||||||
|
limits:
|
||||||
|
cpu: 8
|
||||||
|
memory: 16Gi
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
memory: 6Gi
|
||||||
|
configMapInfo:
|
||||||
|
# use kubectl create configmap fe-configmap --from-file=fe.conf
|
||||||
|
configMapName: fe-configmap
|
||||||
|
resolveKey: fe.conf
|
||||||
|
nodeSelector:
|
||||||
|
uavcloud.env: demo
|
||||||
|
persistentVolumes:
|
||||||
|
- mountPath: /opt/apache-doris/fe/doris-meta
|
||||||
|
name: doriscluster-storage0
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-static-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
# notice: if the storage size less 5G, fe will not start normal.
|
||||||
|
requests:
|
||||||
|
storage: 500Gi
|
||||||
|
- mountPath: /opt/apache-doris/fe/jdbc_drivers
|
||||||
|
name: doriscluster-storage-fe-jdbc-drivers
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: cmlc-nfs-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
beSpec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: doris-deploy
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- "true"
|
||||||
|
replicas: 3
|
||||||
|
image: 172.16.100.55:8033/cmii/doris.be-ubuntu:2.1.6
|
||||||
|
limits:
|
||||||
|
cpu: 8
|
||||||
|
memory: 24Gi
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
memory: 6Gi
|
||||||
|
configMapInfo:
|
||||||
|
# use kubectl create configmap be-configmap --from-file=be.conf
|
||||||
|
configMapName: be-configmap
|
||||||
|
resolveKey: be.conf
|
||||||
|
nodeSelector:
|
||||||
|
uavcloud.env: demo
|
||||||
|
persistentVolumes:
|
||||||
|
- mountPath: /opt/apache-doris/be/storage
|
||||||
|
name: doriscluster-storage1
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-static-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 9000Gi
|
||||||
|
- mountPath: /opt/apache-doris/be/storage
|
||||||
|
name: doriscluster-storage2
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-static-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 9000Gi
|
||||||
|
- mountPath: /opt/apache-doris/be/log
|
||||||
|
name: doriscluster-storage3
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: doris-static-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 9000Gi
|
||||||
|
- mountPath: /opt/apache-doris/be/jdbc_drivers
|
||||||
|
name: doriscluster-storage-be-jdbc-drivers
|
||||||
|
persistentVolumeClaimSpec:
|
||||||
|
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
|
||||||
|
storageClassName: cmlc-nfs-storage
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
7443
agent-common/SplitProject/监管平台-Doris-集群部署/doris-kind.yaml
Normal file
7443
agent-common/SplitProject/监管平台-Doris-集群部署/doris-kind.yaml
Normal file
File diff suppressed because it is too large
Load Diff
340
agent-common/SplitProject/监管平台-Doris-集群部署/doris-operator.yaml
Normal file
340
agent-common/SplitProject/监管平台-Doris-集群部署/doris-operator.yaml
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
# Source: doris-operator/templates/serviceaccount.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: serviceaccount
|
||||||
|
app.kubernetes.io/instance: controller-doris-operator-sa
|
||||||
|
app.kubernetes.io/component: rbac
|
||||||
|
app.kubernetes.io/created-by: doris-operator
|
||||||
|
app.kubernetes.io/part-of: doris-operator
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
name: doris-operator
|
||||||
|
namespace: doriscluster
|
||||||
|
---
|
||||||
|
# Source: doris-operator/templates/clusterrole.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: doris-operator
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- statefulsets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- statefulsets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- autoscaling
|
||||||
|
resources:
|
||||||
|
- horizontalpodautoscalers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- doris.selectdb.com
|
||||||
|
resources:
|
||||||
|
- dorisclusters
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- doris.selectdb.com
|
||||||
|
resources:
|
||||||
|
- dorisclusters/finalizers
|
||||||
|
verbs:
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- doris.selectdb.com
|
||||||
|
resources:
|
||||||
|
- dorisclusters/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- clusterrolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
---
|
||||||
|
# Source: doris-operator/templates/clusterrolebinding.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: clusterrolebinding
|
||||||
|
app.kubernetes.io/instance: doris-operator-rolebinding
|
||||||
|
app.kubernetes.io/component: rbac
|
||||||
|
app.kubernetes.io/created-by: doris-operator
|
||||||
|
app.kubernetes.io/part-of: doris-operator
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
name: doris-operator-rolebinding
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: doris-operator
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: doris-operator
|
||||||
|
namespace: doriscluster
|
||||||
|
---
|
||||||
|
# Source: doris-operator/templates/leader-election-role.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: role
|
||||||
|
app.kubernetes.io/instance: leader-election-role
|
||||||
|
app.kubernetes.io/component: rbac
|
||||||
|
app.kubernetes.io/created-by: doris-operator
|
||||||
|
app.kubernetes.io/part-of: doris-operator
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
name: leader-election-role
|
||||||
|
namespace: doriscluster
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- configmaps
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- patch
|
||||||
|
---
|
||||||
|
# Source: doris-operator/templates/leader-election-role-binding.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: rolebinding
|
||||||
|
app.kubernetes.io/instance: leader-election-rolebinding
|
||||||
|
app.kubernetes.io/component: rbac
|
||||||
|
app.kubernetes.io/created-by: doris-operator
|
||||||
|
app.kubernetes.io/part-of: doris-operator
|
||||||
|
app.kubernetes.io/managed-by: Helm
|
||||||
|
name: leader-election-rolebinding
|
||||||
|
namespace: doriscluster
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: Role
|
||||||
|
name: leader-election-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: doris-operator
|
||||||
|
namespace: doriscluster
|
||||||
|
---
|
||||||
|
# Source: doris-operator/templates/deployment.yaml
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: doris-operator
|
||||||
|
namespace: doriscluster
|
||||||
|
labels:
|
||||||
|
control-plane: doris-operator
|
||||||
|
app.kubernetes.io/name: deployment
|
||||||
|
app.kubernetes.io/instance: doris-operator
|
||||||
|
app.kubernetes.io/component: doris-operator
|
||||||
|
app.kubernetes.io/created-by: doris-operator
|
||||||
|
app.kubernetes.io/part-of: doris-operator
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
control-plane: doris-operator
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
kubectl.kubernetes.io/default-container: doris-operator
|
||||||
|
labels:
|
||||||
|
control-plane: doris-operator
|
||||||
|
spec:
|
||||||
|
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
|
||||||
|
# according to the platforms which are supported by your solution.
|
||||||
|
# It is considered best practice to support multiple architectures. You can
|
||||||
|
# build your manager image using the makefile target docker-buildx.
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: doris-deploy
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- "true"
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
# TODO(user): For common cases that do not require escalating privileges
|
||||||
|
# it is recommended to ensure that all your Pods/Containers are restrictive.
|
||||||
|
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
|
||||||
|
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
|
||||||
|
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
|
||||||
|
# seccompProfile:
|
||||||
|
# type: RuntimeDefault
|
||||||
|
containers:
|
||||||
|
- command:
|
||||||
|
- /dorisoperator
|
||||||
|
args:
|
||||||
|
- --leader-elect
|
||||||
|
image: 172.16.100.55:8033/cmii/doris.k8s-operator:1.3.1
|
||||||
|
name: dorisoperator
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- "ALL"
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8081
|
||||||
|
initialDelaySeconds: 15
|
||||||
|
periodSeconds: 20
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readyz
|
||||||
|
port: 8081
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
# TODO(user): Configure the resources accordingly based on the project requirements.
|
||||||
|
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
memory: 4Gi
|
||||||
|
limits:
|
||||||
|
cpu: 2
|
||||||
|
memory: 4Gi
|
||||||
|
serviceAccountName: doris-operator
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
|
||||||
@@ -1,5 +1,341 @@
|
|||||||
package real_project
|
package real_project
|
||||||
|
|
||||||
|
var Cmii620ArmImageList = []string{
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-sky-converge:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-manager:6.2.0-szgz-arm",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
|
||||||
|
}
|
||||||
|
|
||||||
|
var Cmii620ImageList = []string{
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.2.0-demo",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
|
||||||
|
}
|
||||||
|
|
||||||
|
var Cmii611ImageList = []string{
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-bridge:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-app-release:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-security-center:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sync:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-iot-dispatcher:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-fwdd:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uavms-platform-security-center:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:6.1.1",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/srs:v5.0.195",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA-skip-CHL",
|
||||||
|
}
|
||||||
|
|
||||||
|
var Cmii600ImageList = []string{
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-ruoyi:2024102802",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.8.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-sense-adapter:6.0.0-snapshot-1026-db-confidence-bird",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:6.0.0-31369-102401",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:6.0.0-31369-yunnan-092402",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-iam-gateway:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.7.0-32108-0930",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-advanced5g:6.0.0-102001",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-multilink:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-lifecycle:6.0.0-102901",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.2.0-beta",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:6.0.0-32443-102201",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.7.0-hjltt",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.7.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-pilot2-to-cloud:6.0.0-092502",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uas:6.0.0-102301",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.7.0-29766-0815",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:6.0.0-master600",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.6.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-dispatchh5:5.6.0-0708",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-uasms:6.0.0-31981",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:6.0.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0",
|
||||||
|
"harbor.cdcyy.com.cn/cmii/ossrs/srs:v5.0.195",
|
||||||
|
}
|
||||||
|
|
||||||
var Cmii570ImageList = []string{
|
var Cmii570ImageList = []string{
|
||||||
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
|
"harbor.cdcyy.com.cn/cmii/cmii-uas-gateway:5.6.0",
|
||||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
|
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.5.0",
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ kind: Deployment
|
|||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: cmii-uav-iot-dispatcher
|
name: cmii-uav-iot-dispatcher
|
||||||
namespace: ynejpt
|
namespace: hbyd
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/app-version: 5.7.0
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
app.kubernetes.io/managed-by: octopus-control
|
app.kubernetes.io/managed-by: octopus-control
|
||||||
@@ -28,7 +28,7 @@ spec:
|
|||||||
claimName: nfs-backend-log-pvc
|
claimName: nfs-backend-log-pvc
|
||||||
containers:
|
containers:
|
||||||
- name: cmii-uav-iot-dispatcher
|
- name: cmii-uav-iot-dispatcher
|
||||||
image: '192.168.118.14:8033/cmii/cmii-uav-iot-dispatcher:5.7.0'
|
image: '192.168.0.10:8033/cmii/cmii-uav-iot-dispatcher:6.1.0'
|
||||||
ports:
|
ports:
|
||||||
- name: pod-port
|
- name: pod-port
|
||||||
containerPort: 8080
|
containerPort: 8080
|
||||||
@@ -37,7 +37,7 @@ spec:
|
|||||||
- name: ENV
|
- name: ENV
|
||||||
value: develop
|
value: develop
|
||||||
- name: VERSION
|
- name: VERSION
|
||||||
value: 5.7.0
|
value: 6.0.0
|
||||||
- name: NACOS_SYSTEM_CONFIG_NAME
|
- name: NACOS_SYSTEM_CONFIG_NAME
|
||||||
value: cmii-backend-system
|
value: cmii-backend-system
|
||||||
- name: NACOS_SERVICE_CONFIG_NAME
|
- name: NACOS_SERVICE_CONFIG_NAME
|
||||||
@@ -53,7 +53,7 @@ spec:
|
|||||||
- name: SVC_NAME
|
- name: SVC_NAME
|
||||||
value: cmlc-uav-iot-dispatcher-svc
|
value: cmlc-uav-iot-dispatcher-svc
|
||||||
- name: K8S_NAMESPACE
|
- name: K8S_NAMESPACE
|
||||||
value: ynejpt
|
value: hbyd
|
||||||
- name: APPLICATION_NAME
|
- name: APPLICATION_NAME
|
||||||
value: cmii-uav-iot-dispatcher
|
value: cmii-uav-iot-dispatcher
|
||||||
- name: CUST_JAVA_OPTS
|
- name: CUST_JAVA_OPTS
|
||||||
@@ -68,11 +68,11 @@ spec:
|
|||||||
- name: NACOS_DISCOVERY_PORT
|
- name: NACOS_DISCOVERY_PORT
|
||||||
value: '8080'
|
value: '8080'
|
||||||
- name: BIZ_CONFIG_GROUP
|
- name: BIZ_CONFIG_GROUP
|
||||||
value: 5.7.0
|
value: 6.0.0
|
||||||
- name: SYS_CONFIG_GROUP
|
- name: SYS_CONFIG_GROUP
|
||||||
value: 5.7.0
|
value: 6.0.0
|
||||||
- name: IMAGE_VERSION
|
- name: IMAGE_VERSION
|
||||||
value: 5.7.0
|
value: 6.0.0
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: '2'
|
cpu: '2'
|
||||||
@@ -107,7 +107,7 @@ kind: Service
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: cmii-uav-iot-dispatcher
|
name: cmii-uav-iot-dispatcher
|
||||||
namespace: ynejpt
|
namespace: hbyd
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/app-version: 5.7.0
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
app.kubernetes.io/managed-by: octopus-control
|
app.kubernetes.io/managed-by: octopus-control
|
||||||
|
|||||||
148
agent-common/real_project/cmii-uav-watchdog-agent-test.yaml
Normal file
148
agent-common/real_project/cmii-uav-watchdog-agent-test.yaml
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-watchdog-agent
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog-agent
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog-agent
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog-agent
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-watchdog-agent
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-watchdog-agent-java:2025-03-26-17-09-09
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-watchdog-agent
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 6.2.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: NODE_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.hostIP
|
||||||
|
- name: LIMIT_CPU
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog-agent
|
||||||
|
resource: limits.cpu
|
||||||
|
- name: LIMIT_MEMORY
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog-agent
|
||||||
|
resource: limits.memory
|
||||||
|
- name: REQUEST_CPU
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog-agent
|
||||||
|
resource: requests.cpu
|
||||||
|
- name: REQUEST_MEMORY
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog-agent
|
||||||
|
resource: requests.memory
|
||||||
|
- name: WATCHDOG_HEARTBEAT_URL
|
||||||
|
value: http://cmii-uav-watchdog:8080/api/heartbeat
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-watchdog-agent
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-watchdog-agent
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog-agent
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog-agent
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
178
agent-common/real_project/cmii-uav-watchdog-test.yaml
Normal file
178
agent-common/real_project/cmii-uav-watchdog-test.yaml
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-watchdog
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-watchdog
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-watchdog:2025-03-26-17-33-09
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-watchdog
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 6.2.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: NODE_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.hostIP
|
||||||
|
- name: LIMIT_CPU
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog
|
||||||
|
resource: limits.cpu
|
||||||
|
- name: LIMIT_MEMORY
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog
|
||||||
|
resource: limits.memory
|
||||||
|
- name: REQUEST_CPU
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog
|
||||||
|
resource: requests.cpu
|
||||||
|
- name: REQUEST_MEMORY
|
||||||
|
valueFrom:
|
||||||
|
resourceFieldRef:
|
||||||
|
containerName: cmii-uav-watchdog
|
||||||
|
resource: requests.memory
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/cmii-uav-watchdog/
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-watchdog
|
||||||
|
- name: cmii-uav-watchdog-conf
|
||||||
|
mountPath: /cmii/cmii-uav-watchdog/config.yaml
|
||||||
|
subPath: config.yaml
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
- name: cmii-uav-watchdog-conf
|
||||||
|
configMap:
|
||||||
|
name: cmii-uav-watchdog-configmap
|
||||||
|
items:
|
||||||
|
- key: config.yaml
|
||||||
|
path: config.yaml
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-watchdog
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-watchdog
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
nodePort: 34567
|
||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-watchdog-configmap
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
data:
|
||||||
|
config.yaml: |-
|
||||||
|
server:
|
||||||
|
port: "8080" # 服务器端口
|
||||||
|
|
||||||
|
tier_one_auth:
|
||||||
|
tier_one_secret: "NK537TIWSUOFIS7SYCUJ6A7FPOGFVM3UH67TJRX3IYQAHKZXK2X7SBAA6JOXZVSV3U6K5YZUX7Q6TWOPK6YCRU6MIML33ZJFBN55I2Q" # TOTP密钥
|
||||||
|
time_offset_allowed: 30 # 允许的时间偏移(秒)
|
||||||
|
|
||||||
|
watchdog_center:
|
||||||
|
url: "https://watchdog-center.example.com" # 一级授权中心地址
|
||||||
|
|
||||||
|
project:
|
||||||
|
project_namespace: "uavcloud-devflight" # 项目命名空间
|
||||||
|
|
||||||
|
tier_two_auth:
|
||||||
|
tier_two_secret: "your_tier_two_secret_here" # 二级授权密钥
|
||||||
138
agent-common/real_project/cmii-uavms-pyfusion.yaml
Normal file
138
agent-common/real_project/cmii-uavms-pyfusion.yaml
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: pyfusion-configmap
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
data:
|
||||||
|
config.yaml: |-
|
||||||
|
mqtt:
|
||||||
|
broker: "helm-emqxs"
|
||||||
|
port: 1883
|
||||||
|
username: "cmlc"
|
||||||
|
password: "4YPk*DS%+5"
|
||||||
|
|
||||||
|
topics:
|
||||||
|
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
|
||||||
|
---
|
||||||
|
kind: Deployment
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: cmii-uavms-pyfusion
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
app.kubernetes.io/managed-by: octopus-control
|
||||||
|
cmii.app: cmii-uavms-pyfusion
|
||||||
|
cmii.type: backend
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.app: cmii-uavms-pyfusion
|
||||||
|
cmii.type: backend
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
cmii.app: cmii-uavms-pyfusion
|
||||||
|
cmii.type: backend
|
||||||
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
- name: pyfusion-conf
|
||||||
|
configMap:
|
||||||
|
name: pyfusion-configmap
|
||||||
|
items:
|
||||||
|
- key: config.yaml
|
||||||
|
path: config.yaml
|
||||||
|
containers:
|
||||||
|
- name: cmii-uavms-pyfusion
|
||||||
|
image: 'harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0'
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: VERSION
|
||||||
|
value: 6.2.0
|
||||||
|
- name: NACOS_SYSTEM_CONFIG_NAME
|
||||||
|
value: cmii-backend-system
|
||||||
|
- name: NACOS_SERVICE_CONFIG_NAME
|
||||||
|
value: cmii-uavms-pyfusion
|
||||||
|
- name: NACOS_SERVER_ADDRESS
|
||||||
|
value: 'helm-nacos:8848'
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uavms-pyfusion
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: '8080'
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 6.2.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 6.2.0
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: '2'
|
||||||
|
memory: 3Gi
|
||||||
|
requests:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 500Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
subPath: uavcloud-devflight/cmii-uavms-pyfusion
|
||||||
|
- name: pyfusion-conf
|
||||||
|
mountPath: /app/config.yaml
|
||||||
|
subPath: config.yaml
|
||||||
|
terminationMessagePath: /dev/termination-log
|
||||||
|
terminationMessagePolicy: File
|
||||||
|
imagePullPolicy: Always
|
||||||
|
restartPolicy: Always
|
||||||
|
terminationGracePeriodSeconds: 30
|
||||||
|
dnsPolicy: ClusterFirst
|
||||||
|
securityContext: { }
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
affinity: { }
|
||||||
|
schedulerName: default-scheduler
|
||||||
|
strategy:
|
||||||
|
type: RollingUpdate
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
maxSurge: 25%
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
progressDeadlineSeconds: 600
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: cmii-uavms-pyfusion
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/app-version: 6.2.0
|
||||||
|
app.kubernetes.io/managed-by: octopus-control
|
||||||
|
cmii.app: cmii-uavms-pyfusion
|
||||||
|
cmii.type: backend
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
protocol: TCP
|
||||||
|
port: 8080
|
||||||
|
targetPort: 8080
|
||||||
|
selector:
|
||||||
|
cmii.app: cmii-uavms-pyfusion
|
||||||
|
cmii.type: backend
|
||||||
|
type: ClusterIP
|
||||||
|
sessionAffinity: None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
91
agent-common/real_project/cmii_external/任一珂前端.yaml
Normal file
91
agent-common/real_project/cmii_external/任一珂前端.yaml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-renyike
|
||||||
|
namespace: uavcloud-devoperation
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-renyike
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-renyike
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-renyike
|
||||||
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-platform-renyike
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-renyike:6.0.0-20241202
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devoperation
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-platform-renyike
|
||||||
|
ports:
|
||||||
|
- name: platform-9528
|
||||||
|
containerPort: 9528
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "1"
|
||||||
|
memory: 1Gi
|
||||||
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: nginx-conf
|
||||||
|
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||||
|
subPath: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
subPath: ingress-config.js
|
||||||
|
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||||
|
volumes:
|
||||||
|
- name: nginx-conf
|
||||||
|
configMap:
|
||||||
|
name: nginx-cm
|
||||||
|
items:
|
||||||
|
- key: nginx.conf
|
||||||
|
path: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
configMap:
|
||||||
|
name: tenant-prefix-splice
|
||||||
|
items:
|
||||||
|
- key: ingress-config.js
|
||||||
|
path: ingress-config.js
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-renyike
|
||||||
|
namespace: uavcloud-devoperation
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-renyike
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
selector:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-renyike
|
||||||
|
ports:
|
||||||
|
- name: web-svc-port
|
||||||
|
port: 9528
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9528
|
||||||
|
nodePort: 33333
|
||||||
|
---
|
||||||
271
agent-common/real_project/cmii_external/军哥区块链-前端.yaml
Normal file
271
agent-common/real_project/cmii_external/军哥区块链-前端.yaml
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-classification
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-classification
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-classification
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-classification
|
||||||
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-platform-classification
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-classification:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-platform-classification
|
||||||
|
ports:
|
||||||
|
- name: platform-9528
|
||||||
|
containerPort: 9528
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "1"
|
||||||
|
memory: 1Gi
|
||||||
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: nginx-conf
|
||||||
|
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||||
|
subPath: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
subPath: ingress-config.js
|
||||||
|
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||||
|
volumes:
|
||||||
|
- name: nginx-conf
|
||||||
|
configMap:
|
||||||
|
name: nginx-cm
|
||||||
|
items:
|
||||||
|
- key: nginx.conf
|
||||||
|
path: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
configMap:
|
||||||
|
name: tenant-prefix-splice
|
||||||
|
items:
|
||||||
|
- key: ingress-config.js
|
||||||
|
path: ingress-config.js
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-classification
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-classification
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-classification
|
||||||
|
ports:
|
||||||
|
- name: web-svc-port
|
||||||
|
port: 9528
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9528
|
||||||
|
---
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-scanner
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-scanner
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-scanner
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-scanner
|
||||||
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-platform-scanner
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-scanner:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-platform-scanner
|
||||||
|
ports:
|
||||||
|
- name: platform-9528
|
||||||
|
containerPort: 9528
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "1"
|
||||||
|
memory: 1Gi
|
||||||
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: nginx-conf
|
||||||
|
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||||
|
subPath: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
subPath: ingress-config.js
|
||||||
|
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||||
|
volumes:
|
||||||
|
- name: nginx-conf
|
||||||
|
configMap:
|
||||||
|
name: nginx-cm
|
||||||
|
items:
|
||||||
|
- key: nginx.conf
|
||||||
|
path: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
configMap:
|
||||||
|
name: tenant-prefix-splice
|
||||||
|
items:
|
||||||
|
- key: ingress-config.js
|
||||||
|
path: ingress-config.js
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-scanner
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-scanner
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-scanner
|
||||||
|
ports:
|
||||||
|
- name: web-svc-port
|
||||||
|
port: 9528
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9528
|
||||||
|
---
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-blockchain
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-blockchain
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-blockchain
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-blockchain
|
||||||
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-platform-blockchain
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-platform-blockchain:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-platform-blockchain
|
||||||
|
ports:
|
||||||
|
- name: platform-9528
|
||||||
|
containerPort: 9528
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "1"
|
||||||
|
memory: 1Gi
|
||||||
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: nginx-conf
|
||||||
|
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||||
|
subPath: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
subPath: ingress-config.js
|
||||||
|
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||||
|
volumes:
|
||||||
|
- name: nginx-conf
|
||||||
|
configMap:
|
||||||
|
name: nginx-cm
|
||||||
|
items:
|
||||||
|
- key: nginx.conf
|
||||||
|
path: nginx.conf
|
||||||
|
- name: tenant-prefix
|
||||||
|
configMap:
|
||||||
|
name: tenant-prefix-splice
|
||||||
|
items:
|
||||||
|
- key: ingress-config.js
|
||||||
|
path: ingress-config.js
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-platform-blockchain
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-blockchain
|
||||||
|
octopus.control: frontend-app-wdd
|
||||||
|
app.kubernetes.io/version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: frontend
|
||||||
|
cmii.app: cmii-uav-platform-blockchain
|
||||||
|
ports:
|
||||||
|
- name: web-svc-port
|
||||||
|
port: 9528
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9528
|
||||||
|
---
|
||||||
561
agent-common/real_project/cmii_external/军哥区块链-后端.yaml
Normal file
561
agent-common/real_project/cmii_external/军哥区块链-后端.yaml
Normal file
@@ -0,0 +1,561 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-blockchain
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-blockchain
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 0
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-blockchain
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-blockchain
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-blockchain
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-blockchain:3.2.2-snapshot
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-blockchain
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 5.7.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
startupProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-blockchain
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-blockchain
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-blockchain
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-blockchain
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-container-scanner
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 0
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-container-scanner
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-container-scanner:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-container-scanner
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 5.7.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
startupProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-container-scanner
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-container-scanner
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-container-scanner-go
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner-go
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 0
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner-go
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner-go
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-container-scanner-go
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-container-scanner-go:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-container-scanner-go
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 5.7.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
startupProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-container-scanner-go
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-container-scanner-go
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner-go
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-container-scanner-go
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-data-classification
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-data-classification
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
replicas: 0
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-data-classification
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-data-classification
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: uavcloud.env
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- devflight
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: harborsecret
|
||||||
|
containers:
|
||||||
|
- name: cmii-uav-data-classification
|
||||||
|
image: harbor.cdcyy.com.cn/cmii/cmii-uav-data-classification:5.6.0
|
||||||
|
imagePullPolicy: Always
|
||||||
|
env:
|
||||||
|
- name: K8S_NAMESPACE
|
||||||
|
value: uavcloud-devflight
|
||||||
|
- name: APPLICATION_NAME
|
||||||
|
value: cmii-uav-data-classification
|
||||||
|
- name: CUST_JAVA_OPTS
|
||||||
|
value: "-Xms200m -Xmx1500m -Dlog4j2.formatMsgNoLookups=true"
|
||||||
|
- name: NACOS_REGISTRY
|
||||||
|
value: "helm-nacos:8848"
|
||||||
|
- name: NACOS_DISCOVERY_IP
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: status.podIP
|
||||||
|
- name: NACOS_DISCOVERY_PORT
|
||||||
|
value: "8080"
|
||||||
|
- name: BIZ_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: SYS_CONFIG_GROUP
|
||||||
|
value: 5.7.0
|
||||||
|
- name: IMAGE_VERSION
|
||||||
|
value: 5.7.0
|
||||||
|
- name: NACOS_USERNAME
|
||||||
|
value: "developer"
|
||||||
|
- name: NACOS_PASSWORD
|
||||||
|
value: "Deve@9128201"
|
||||||
|
ports:
|
||||||
|
- name: pod-port
|
||||||
|
containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 2Gi
|
||||||
|
cpu: "2"
|
||||||
|
requests:
|
||||||
|
memory: 200Mi
|
||||||
|
cpu: 200m
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 3
|
||||||
|
startupProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /cmii/health
|
||||||
|
port: pod-port
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 20
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
volumeMounts:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
mountPath: /cmii/logs
|
||||||
|
readOnly: false
|
||||||
|
subPath: uavcloud-devflight/cmii-uav-data-classification
|
||||||
|
volumes:
|
||||||
|
- name: nfs-backend-log-volume
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nfs-backend-log-pvc
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: cmii-uav-data-classification
|
||||||
|
namespace: uavcloud-devflight
|
||||||
|
labels:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-data-classification
|
||||||
|
octopus/control: backend-app-1.0.0
|
||||||
|
app.kubernetes.io/managed-by: octopus
|
||||||
|
app.kubernetes.io/app-version: 5.7.0
|
||||||
|
spec:
|
||||||
|
type: ClusterIP
|
||||||
|
selector:
|
||||||
|
cmii.type: backend
|
||||||
|
cmii.app: cmii-uav-data-classification
|
||||||
|
ports:
|
||||||
|
- name: backend-tcp
|
||||||
|
port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
@@ -0,0 +1,110 @@
|
|||||||
|
{\rtf1\ansi\ansicpg936\deff0\deflang1033\deflangfe2052{\fonttbl{\f0\fnil\fcharset129 Courier New;}{\f1\fmodern\fprq6\fcharset134 \'cb\'ce\'cc\'e5;}}
|
||||||
|
\viewkind4\uc1\pard\lang2052\f0\fs10 \'a6\'a3\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a4
|
||||||
|
\par \'a6\'a2 ? MobaXterm 12.4 ? \'a6\'a2
|
||||||
|
\par \'a6\'a2 (SSH client, X-server and networking tools) \'a6\'a2
|
||||||
|
\par \'a6\'a2 \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? SSH session to root@192.168.35.71 \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? SSH compression : ? \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? SSH-browser : ? \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? X11-forwarding : ? (disabled or not supported by server) \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? DISPLAY : 10.250.0.14:0.0 \'a6\'a2
|
||||||
|
\par \'a6\'a2 \'a6\'a2
|
||||||
|
\par \'a6\'a2 ? For more info, ctrl+click on help or visit our website \'a6\'a2
|
||||||
|
\par \'a6\'a6\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a5
|
||||||
|
\par
|
||||||
|
\par Authorized users only.All activity may be monitored and reported!
|
||||||
|
\par Last login: Tue Dec 17 14:26:14 2024 from 192.168.103.36
|
||||||
|
\par ? root@onetools-2 \f1\'ab\'f3 ~ \'ab\'f3 ls
|
||||||
|
\par 1.py all-gzip-image-list.txt go logs node-p.tar.gz port_linux_amd64 wdd
|
||||||
|
\par 2.sh Clash.Verge_2.0.1_x64-setup.exe image nethogs nohup.out Postman-win64.exe
|
||||||
|
\par all-cmii-image-list.txt cmii_third k0s node-p-0.8.7.tar.gz octopus_image test.sh
|
||||||
|
\par ? root@onetools-2 \'ab\'f3 ~ \'ab\'f3 cd image
|
||||||
|
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3 ls
|
||||||
|
\par 2.sh
|
||||||
|
\par 'cmii-live-operator=v5.7.0=2024-12-11=206.tar.gz'
|
||||||
|
\par 'cmii-suav-supervision=6.1.1=2024-12-10=418.tar.gz'
|
||||||
|
\par 'cmii-uav-advanced5g=6.1.1=2024-12-11=915.tar.gz'
|
||||||
|
\par 'cmii-uav-bridge=5.7.0-xzga-121001=2024-12-10=210.tar.gz'
|
||||||
|
\par 'cmii-uav-bridge=5.7.0-xzga-1210=2024-12-10=522.tar.gz'
|
||||||
|
\par 'cmii-uav-cloud-live=5.7.0-szga=2024-12-13=816.tar.gz'
|
||||||
|
\par 'cmii-uav-device=5.6.0-szga-1212-arm=2024-12-12=354.tar.gz'
|
||||||
|
\par 'cmii-uav-device=5.6.0-szga-1216-arm=2024-12-16=178.tar.gz'
|
||||||
|
\par 'cmii-uav-device=6.1.0-szga-1210=2024-12-10=908.tar.gz'
|
||||||
|
\par 'cmii-uav-integration=6.1.0-xzga-1211=2024-12-11=277.tar.gz'
|
||||||
|
\par 'cmii-uav-integration=6.1.0-xzga-1212=2024-12-12=223.tar.gz'
|
||||||
|
\par 'cmii-uav-integration=6.1.0-xzga-1212=2024-12-12=770.tar.gz'
|
||||||
|
\par 'cmii-uav-mission=5.4.0-zyga-1216=2024-12-16=288.tar.gz'
|
||||||
|
\par 'cmii-uav-mission=6.1.1-xzga=2024-12-11=578.tar.gz'
|
||||||
|
\par 'cmii-uav-mqtthandler=6.1.0-1217-shbj-arm=2024-12-17=514.tar.gz'
|
||||||
|
\par 'cmii-uav-oauth=6.0.0-33992-120601=2024-12-13=980.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.4.0-27971-zyga-1217=2024-12-17=845.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121101-arm=2024-12-11=423.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121201-arm=2024-12-12=915.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121301-arm=2024-12-13=302.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121601-arm=2024-12-16=674.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121602-arm=2024-12-16=611.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=5.7.0-32124-121701-arm=2024-12-17=867.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.0.0-33992-121301=2024-12-13=435.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.1.0-32124-shbj-1217-arm=2024-12-17=736.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.1.0-33579-121110=2024-12-11=944.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.1.0-33579-1211=2024-12-11=358.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.1.1-33579=2024-12-10=863.tar.gz'
|
||||||
|
\par 'cmii-uav-platform=6.1.1-33579=2024-12-10=866.tar.gz'
|
||||||
|
\par 'cmii-uav-platform-share=6.1.1=2024-12-10=429.tar.gz'
|
||||||
|
\par 'cmii-uav-surveillance=5.6.0-szga-1211-arm=2024-12-11=866.tar.gz'
|
||||||
|
\par 'cmii-uav-surveillance=5.6.0-szga-1217-arm=2024-12-17=394.tar.gz'
|
||||||
|
\par 'cmii-uav-surveillance=5.7.0-xzga-121101=2024-12-11=439.tar.gz'
|
||||||
|
\par 'cmii-uav-surveillance=5.7.0-xzga-121101=2024-12-11=939.tar.gz'
|
||||||
|
\par 'cmii-zlm-oss-adaptor=v2.7.3=2024-12-11=473.tar.gz'
|
||||||
|
\par 'cmii-zlm-oss-adaptor=v2.7.3=2024-12-11=485.tar.gz'
|
||||||
|
\par 'cmlc-live=v2.7.3=2024-12-11=256.tar.gz'
|
||||||
|
\par 'cmlc-live=v2.7.3=2024-12-11=354.tar.gz'
|
||||||
|
\par download_and_compress.sh
|
||||||
|
\par image-clean.sh
|
||||||
|
\par image-sync.sh
|
||||||
|
\par kubectl-1.30.4-amd64
|
||||||
|
\par 'nginx=1.27.0=2024-12-11=538.tar.gz'
|
||||||
|
\par nohup.out
|
||||||
|
\par rke-1.30.4
|
||||||
|
\par yaml
|
||||||
|
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3 bash image-sync.sh -h 172.26.0.31:8033 -u harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'d0\'e8\'d2\'aa\'b4\'a6\'c0\'ed\'b5\'c4\'be\'b5\'cf\'f1\'c3\'fb\'b3\'c6\'ce\'aa => harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bf\'aa\'ca\'bc\'cf\'c2\'d4\'d8\'be\'b5\'cf\'f1\'a3\'a1
|
||||||
|
\par
|
||||||
|
\par \'cf\'c2\'d4\'d8-\'be\'b5\'cf\'f1\'cf\'c2\'d4\'d8\'b3\'c9\'b9\'a6\'a3\'a1 => harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bd\'ab\'d2\'aa\'b0\'d1\'be\'b5\'cf\'f1\'d1\'b9\'cb\'f5\'ce\'aa => cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'d1\'b9\'cb\'f5\'b3\'c9\'b9\'a6 \'a3\'a1 cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'bf\'aa\'ca\'bc\'c9\'cf\'b4\'ab\'d6\'c1OSS\'d6\'d0!
|
||||||
|
\par ...4-12-18=140.tar.gz: 204.79 MiB / 204.79 MiB \'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5\'a9\'a5 24.61 MiB/s 8s\'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'c9\'cf\'b4\'abOSS\'b3\'c9\'b9\'a6 => [2024-12-18 09:13:52 CST] 205MiB STANDARD cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'c7\'eb\'d4\'da\'c4\'bf\'b1\'eaMaster\'d6\'f7\'bb\'fa\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'c3\'fc\'c1\'ee \'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd
|
||||||
|
\par
|
||||||
|
\par
|
||||||
|
\par source <(curl -sL https://b2.107421.xyz/image-sync.sh) -d cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
|
||||||
|
\par
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'b8\'fc\'d0\'c2\'a1\'bf - \'d2\'bb\'bc\'fc\'b8\'fc\'d0\'c2\'ce\'a2\'b7\'fe\'ce\'f1\'b5\'c4Tag\'a3\'ac\'c7\'eb\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'c3\'fc\'c1\'ee \'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd\'a1\'fd
|
||||||
|
\par
|
||||||
|
\par wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && bash ./cmii-update.sh cmii-admin-data=6.1.1=2024-12-18=140.tar.gz
|
||||||
|
\par
|
||||||
|
\par
|
||||||
|
\par \'a1\'be\'c9\'cf\'b4\'ab\'a1\'bf - \'ca\'d6\'b6\'af\'c3\'fc\'c1\'ee\'d6\'b4\'d0\'d0\'c8\'e7\'cf\'c2\'a3\'ac \'c4\'bf\'b1\'ea\'be\'b5\'cf\'f1\'c8\'ab\'b3\'cc\'b5\'d8\'d6\'b7\'ce\'aa => 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1
|
||||||
|
\par
|
||||||
|
\par wget https://oss.demo.uavcmlc.com/cmlc-installation/tmp/cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && docker load < cmii-admin-data=6.1.1=2024-12-18=140.tar.gz && docker tag harbor.cdcyy.com.cn/cmii/cmii-admin-data:6.1.1 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1 && docker push 172.26.0.31:8033/cmii/cmii-admin-data:6.1.1
|
||||||
|
\par
|
||||||
|
\par
|
||||||
|
\par ? root@onetools-2 \'ab\'f3 ~/image \'ab\'f3
|
||||||
|
\par Network error: Software caused connection abort
|
||||||
|
\par
|
||||||
|
\par \f0\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1\'a6\'a1
|
||||||
|
\par
|
||||||
|
\par Session stopped
|
||||||
|
\par - Press <return> to exit tab
|
||||||
|
\par - Press R to restart session
|
||||||
|
\par - Press S to save terminal output to file
|
||||||
|
\par
|
||||||
|
\par }
|
||||||
|
| ||||||