新增doris的部署内容

This commit is contained in:
zeaslity
2025-03-05 14:46:36 +08:00
parent 3cf5e369c1
commit 2e96490926
40 changed files with 27731 additions and 17182 deletions

View File

@@ -0,0 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="CMII镜像同步-11.8-ARM" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus" />
<target name="root@192.168.11.8:22" />
<working_directory value="$PROJECT_DIR$/agent-operator" />
<kind value="PACKAGE" />
<package value="wdd.io/agent-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<option name="build_on_remote_target" value="true" />
<framework value="gotest" />
<pattern value="^\QTestPullFromEntityAndSyncConditionally\E$" />
<method v="2" />
</configuration>
</component>

View File

@@ -0,0 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Cmii镜像同步-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus" />
<target name="wdd-dev-35.70" />
<working_directory value="$PROJECT_DIR$/agent-operator" />
<kind value="PACKAGE" />
<package value="wdd.io/agent-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<option name="build_on_remote_target" value="true" />
<framework value="gotest" />
<pattern value="^\QTestPullFromEntityAndSyncConditionally\E$" />
<method v="2" />
</configuration>
</component>

View File

@@ -1,15 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="DEMO更新-3570" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus"/>
<target name="wdd-dev-35.70"/>
<working_directory value="$PROJECT_DIR$/agent-operator"/>
<kind value="PACKAGE"/>
<package value="wdd.io/agent-operator"/>
<directory value="$PROJECT_DIR$"/>
<filePath value="$PROJECT_DIR$"/>
<option name="build_on_remote_target" value="true"/>
<framework value="gotest"/>
<pattern value="^\QTestUpdateCmiiImageTagFromNameTagMap\E$"/>
<method v="2"/>
</configuration>
<configuration default="false" name="DEMO更新-3570" type="GoTestRunConfiguration" factoryName="Go Test" singleton="false">
<module name="ProjectOctopus" />
<target name="wdd-dev-35.70" />
<working_directory value="$PROJECT_DIR$/agent-operator" />
<kind value="PACKAGE" />
<package value="wdd.io/agent-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<option name="build_on_remote_target" value="true" />
<framework value="gotest" />
<pattern value="^\QTestUpdateCmiiImageTagFromNameTagMap\E$" />
<method v="2" />
</configuration>
</component>

View File

@@ -0,0 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Middle镜像-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus" />
<target name="wdd-dev-35.70" />
<working_directory value="$PROJECT_DIR$/agent-operator" />
<kind value="PACKAGE" />
<package value="wdd.io/agent-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<option name="build_on_remote_target" value="true" />
<framework value="gotest" />
<pattern value="^\QTestFetchDependencyRepos_Middle\E$" />
<method v="2" />
</configuration>
</component>

View File

@@ -0,0 +1,15 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Middle镜像-ARM-11.8" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus" />
<target name="root@192.168.11.8:22" />
<working_directory value="$PROJECT_DIR$/agent-operator" />
<kind value="PACKAGE" />
<package value="wdd.io/agent-operator" />
<directory value="$PROJECT_DIR$" />
<filePath value="$PROJECT_DIR$" />
<option name="build_on_remote_target" value="true" />
<framework value="gotest" />
<pattern value="^\QTestFetchDependencyRepos_Middle\E$" />
<method v="2" />
</configuration>
</component>

View File

@@ -1,5 +1,5 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="院内镜像清理-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
<configuration default="false" name="院内Harbor清理-35.70" type="GoTestRunConfiguration" factoryName="Go Test">
<module name="ProjectOctopus" />
<target name="wdd-dev-35.70" />
<working_directory value="$PROJECT_DIR$/agent-operator/image" />

View File

@@ -0,0 +1,387 @@
import random
import threading
from queue import Queue
from paho.mqtt import client as mqtt_client
import numpy as np
import time
import logging
import os
import datetime
from KF_V2 import *
from utils import *
from config import *
import argparse
import json
import yaml
# 首先加载yaml配置
def load_mqtt_config():
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return config['mqtt'], config['topics']
# 获取MQTT和topics配置
mqtt_config, topics_config = load_mqtt_config()
## =======================
# MQTT 代理地址
# broker = '192.168.36.234'
# port = 37826
# username = "cmlc"
# password = "odD8#Ve7.B"
client_id = f'python-mqtt-{random.randint(0, 100)}'
# 创建 ArgumentParser 对象
parser = argparse.ArgumentParser(description='处理命令行参数')
# 添加参数 task_id简称 t类型为 int默认值为 1
parser.add_argument('-t', '--task_id', type=str, default="+", help='任务ID')
# 添加参数 gate简称 g类型为 str默认值为 "default_gate"
parser.add_argument('-g', '--gate', type=int, default=30, help='门限值')
# 添加参数 interval简称 i类型为 float默认值为 1.0
parser.add_argument('-i', '--interval', type=float, default=1.0, help='时间间隔')
# 解析命令行参数
args = parser.parse_args()
# 实例化 DataFusion 类
fusion_instance = DataFusion(
gate=args.gate,
interval=args.interval,
)
global task_id
task_id = "10087"
# 从yaml的mqtt_topic中提取基础路径
base_path = topics_config['mqtt_topic'].split('/')[0] # 获取"bridge"
# 更新数据上报的主题格式
providerCode = "DP74b4ef9fb4aaf269"
fusionCode = "DPZYLY"
deviceType = "5ga"
fusionType = "fusion"
deviceId = "10580005"
fusionId = "554343465692430336"
sensor_id_list = ["80103"]
# 使用base_path构建topic
topic = f"{base_path}/{providerCode}/device_data/{deviceType}/{deviceId}"
# 从yaml的sensor_topic中提取基础路径
base_topic = topics_config['sensor_topic'].split('FU_PAM')[0] # 得到 "fromcheck/DP74b4ef9fb4aaf269/device_data/"
# 订阅主题 - 基于yaml格式构建
subscribe_topic = f"{base_topic}5ga/10000000000000" # 将FU_PAM替换为5ga,将+替换为具体ID
# 发布融合结果的主题
# fusionId的来源是下发任务时的ID
publish_topic = f"fromcheck/{fusionCode}/device_data/{fusionType}/{task_id}"
# 更新运行参数的主题
fusion_parameters_topic = topics_config['sensor_topic']
# 生成唯一的 client_id
# 数据池
data_pool = Queue()
run_parameter = None
interval = args.interval
# 定义参考点 PO纬度, 经度)
global reference_point
reference_point = (104.08, 30.51) # 参考点的经纬度
# 数据池
data_pool = Queue()
run_parameter = None
# 初始化数据处理类
pipe = Pipeline(fusion_parameters_topic=topics_config['sensor_topic'], reference_point=reference_point)
fusion_code = "FU_PAM/"+args.task_id
# 设置日志记录
def setup_logging():
# 创建logs目录如果不存在
if not os.path.exists('logs'):
os.makedirs('logs')
# 设置日志文件名(包含日期)
current_time = datetime.datetime.now()
error_log_filename = f'logs/mqtt_connection_{current_time.strftime("%Y%m%d")}_error.log'
# 配置总的日志记录器
logging.basicConfig(
level=logging.INFO, # 记录所有信息
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler() # 同时输出到控制台
]
)
# 配置错误日志记录器
error_logger = logging.getLogger('error_logger')
error_logger.setLevel(logging.ERROR)
# 创建文件处理器
error_handler = logging.FileHandler(error_log_filename)
error_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
# 添加处理器到错误日志记录器
error_logger.addHandler(error_handler)
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Successfully connected to MQTT Broker")
logging.info(f"Client ID: {client_id}")
logging.info(f"Broker: {mqtt_config['broker']}:{mqtt_config['port']}")
# 重新订阅主题
client.subscribe(fusion_parameters_topic)
logging.info(f"Subscribed to fusion parameters topic: {fusion_parameters_topic}")
if hasattr(pipe, 'topics'):
for topic in pipe.topics:
client.subscribe(topic)
logging.info(f"Subscribed to topic: {topic}")
else:
logging.error(f"Failed to connect, return code: {rc} ({DISCONNECT_REASONS.get(rc, '未知错误')})")
def on_disconnect(client, userdata, rc):
current_time = datetime.datetime.now()
reason = DISCONNECT_REASONS.get(rc, "未知错误")
logging.warning(f"Disconnected from MQTT Broker at {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
logging.warning(f"Disconnect reason code: {rc} - {reason}")
if rc != 0:
logging.error("Unexpected disconnection. Attempting to reconnect...")
try:
client.reconnect()
logging.info("Reconnection successful")
except Exception as e:
current_time = datetime.datetime.now()
logging.error(f"Reconnection failed at {current_time.strftime('%Y-%m-%d %H:%M:%S')}: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
client = mqtt_client.Client(client_id, clean_session=True)
client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
# 设置保活时间和重试间隔
client.keepalive = 60 # 60秒的保活时间
client.socket_timeout = 30 # 30秒的socket超时
client.reconnect_delay_set(min_delay=1, max_delay=60) # 重连延迟在1-60秒之间
# 设置遗嘱消息last will message
will_topic = f"fromcheck/{fusionCode}/status/{task_id}"
will_payload = "offline"
client.will_set(will_topic, will_payload, qos=1, retain=True)
# 设置回调函数
client.on_connect = on_connect
client.on_disconnect = on_disconnect
try:
client.connect(mqtt_config['broker'], mqtt_config['port'])
except Exception as e:
logging.error(f"Initial connection failed: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
time.sleep(5)
return connect_mqtt()
# 发送上线状态
client.publish(will_topic, "online", qos=1, retain=True)
return client
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
try:
global run_parameter
global task_id
logging.info(f"Received message on topic: {msg.topic}")
logging.info(f"Message payload: {msg.payload.decode()}")
if "FU_PAM" in msg.topic:
if args.task_id == '+' or fusion_code in msg.topic:
new_run_parameter = msg.payload.decode()
if run_parameter != new_run_parameter:
logging.info(f"Run parameter updated from {run_parameter} to {new_run_parameter}")
run_parameter = new_run_parameter
new_topics = pipe.extract_parms(run_parameter)
logging.info(f"Extracted topics: {new_topics}")
client.subscribe(new_topics) # 重新更新订阅的数据
logging.info(f"Subscribed to new topics: {new_topics}")
logging.info('===========new run_parameter!===============')
current_time = datetime.datetime.now()
task_id = pipe.task_id
else:
data_pool.put((msg.topic, msg.payload))
except Exception as e:
logging.error(f"Error processing message: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
subscribe_topics = [(subscribe_topic, 0), (fusion_parameters_topic, 0)] # 默认QoS为0
client.subscribe(subscribe_topics)
client.on_message = on_message
def publish(client, message):
global task_id
global fusionCode
max_retries = 3
retry_delay = 1 # 初始重试延迟(秒)
def do_publish():
publish_topic = f"bridge/{fusionCode}/device_data/fusion/{task_id}"
try:
result = client.publish(publish_topic, message)
status = result.rc
if status == 0:
current_time = datetime.datetime.now()
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('log.txt', 'a') as log_file:
log_file.write('=====================\n')
log_file.write(f"Send message to topic {publish_topic}\n")
log_file.write(f"time: {formatted_time}\n")
log_file.write(f"{message}\n")
return True
else:
logging.error(f"Failed to send message to topic {publish_topic}, status: {status}")
return False
except Exception as e:
logging.error(f"Error publishing message: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
return False
# 实现重试逻辑
for attempt in range(max_retries):
if do_publish():
return
if attempt < max_retries - 1: # 如果不是最后一次尝试
retry_delay *= 2 # 指数退避
logging.warning(f"Retrying publish in {retry_delay} seconds...")
time.sleep(retry_delay)
logging.error(f"Failed to publish message after {max_retries} attempts")
def data_fusion(fusion_container):
global data_pool
data_list = []
# 从数据池中提取所有的数据
while not data_pool.empty():
data_now = data_pool.get()
processed_data = pipe.process_json_data(data_now[1])
# 筛选有意义的数据
if processed_data and processed_data.get("objects"): # 只记录有 objects 的数据
data_list.append(processed_data)
if data_list: # 只有当有数据时才写日志
current_time = datetime.datetime.now()
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('Data_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"Get message \n")
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
log_file.write(f"{data_list}\n") # 写入消息内容
sensor_data = pipe.data_encoder(data_list)
logging.info(sensor_data)
filtered_results = fusion_container.run(sensor_data)
processed_data = pipe.data_decoder(filtered_results)
processed_data = json.dumps(processed_data, indent=4)
return processed_data # 返回处理后的 JSON 字符串
def fusion_runner(client):
global run_parameter
pre_run_parameter = run_parameter
last_run_time = time.time()
last_health_check = time.time()
health_check_interval = 30 # 每30秒进行一次健康检查
fusion_container = DataFusion(args.gate, args.interval)
def check_connection():
if not client.is_connected():
logging.warning("MQTT client disconnected during fusion_runner")
try:
client.reconnect()
logging.info("Successfully reconnected in fusion_runner")
return True
except Exception as e:
logging.error(f"Reconnection failed in fusion_runner: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
return False
return True
while True:
try:
current_time = time.time()
# 定期健康检查
if current_time - last_health_check >= health_check_interval:
if not check_connection():
time.sleep(5) # 如果连接失败等待5秒后继续
continue
last_health_check = current_time
# 数据处理和发送
if current_time - last_run_time >= interval:
if not check_connection():
continue
last_run_time = current_time
if run_parameter != pre_run_parameter:
fusion_parms = pipe.extract_fusion_parms(run_parameter)
fusion_container.set_parameter(fusion_parms)
pre_run_parameter= run_parameter
processed_data = data_fusion(fusion_container)
if processed_data:
publish(client, processed_data)
except Exception as e:
logging.error(f"Error in fusion_runner: {str(e)}")
logging.error(f"Exception type: {type(e).__name__}")
logging.error(f"Stack trace:", exc_info=True)
time.sleep(1)
def run():
# 初始化日志系统
setup_logging()
logging.error("Starting MQTT client application")
while True: # 添加外层循环来处理完全断开的情况
try:
client = connect_mqtt()
subscribe(client)
logging.info("Starting fusion_runner thread")
fusion_runner_thread = threading.Thread(target=fusion_runner, args=(client,), daemon=True)
fusion_runner_thread.start()
logging.info("Starting MQTT loop")
client.loop_forever()
except Exception as e:
logging.critical(f"Critical error in main loop: {str(e)}")
logging.critical(f"Exception type: {type(e).__name__}")
logging.critical(f"Stack trace:", exc_info=True)
logging.info("Restarting in 5 seconds...")
time.sleep(5)
if __name__ == '__main__':
run()

View File

@@ -0,0 +1,145 @@
import json
import time
import random
from math import radians, degrees, sin, cos
from paho.mqtt import client as mqtt_client
import datetime
import numpy as np
from math import atan2, sqrt
# 坐标转换函数
def convert_to_cartesian(lat, lon, reference_point):
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
lat_rad = radians(lat)
lon_rad = radians(lon)
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算基于参考点的平面直角坐标
delta_lon = lon_rad - ref_lon_rad
X = (N + 0) * cos(ref_lat_rad) * delta_lon
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
return X, Y
# 模拟数据生成函数
def generate_simulated_data(reference_point, radius_km, angle):
"""生成模拟数据,符合 Pipeline 处理需求"""
R = 6371000 # 地球半径(米)
# 将半径转换为弧度
radius = radius_km / R
# 计算参考点经纬度
lat0, lon0 = reference_point
# 计算新的点的经度和纬度
new_lat = lat0 + degrees(radius * cos(radians(angle)))
new_lon = lon0 + degrees(radius * sin(radians(angle)) / cos(radians(lat0)))
# 生成模拟 JSON 数据
mock_data = {
"deviceId": "80103",
"deviceType": 10,
"objects": [
{
"altitude": 150.0, # 模拟高度
"extension": {
"traceId": "00000000000001876",
"channel": "5756500000",
"objectType": 30,
"uavId": "UAS123456", # 新增字段,与 Pipeline 对应
"uavModel": "DJI Mini 3 Pro", # 模拟 UAV 型号
"deviceId": "80103" # 来源设备 ID
},
"height": 120.0, # 高度
"latitude": new_lat,
"longitude": new_lon,
"X": 0.0, # 预留字段,供转换函数填充
"Y": 0.0, # 预留字段,供转换函数填充
"speed": 15.0, # 模拟速度
"objectId": "AX0009", # 模拟目标 ID
"time": int(time.time() * 1000), # 当前时间戳(毫秒)
"source": [["sensor1", "UAS123456"]] # 模拟来源
}
],
"providerCode": "ZYLYTEST",
"ptTime": int(time.time() * 1000) # 当前时间戳(毫秒)
}
# 转换坐标
for obj in mock_data["objects"]:
lat, lon = obj["latitude"], obj["longitude"]
obj["X"], obj["Y"] = convert_to_cartesian(lat, lon, reference_point)
return json.dumps(mock_data, indent=4)
# MQTT 推送代码
broker = '192.168.36.234'
port = 37826
providerCode = "DP74b4ef9fb4aaf269"
deviceType = "5ga"
deviceId = "10580015"
topic = f"bridge/{providerCode}/device_data/{deviceType}/{deviceId}"
client_id = f'python-mqtt-{random.randint(0, 1000)}'
username = "cmlc"
password = "odD8#Ve7.B"
reference_point = (31.880000, 117.240000) # 经度和纬度
radius = 1500 # 半径,单位:米
def connect_mqtt():
"""连接 MQTT Broker"""
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print(f"Failed to connect, return code {rc}")
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.username_pw_set(username, password)
client.connect(broker, port)
return client
def publish(client):
"""推送生成的模拟数据"""
msg_count = 0
angle = 0
while True:
time.sleep(1)
msg = generate_simulated_data(reference_point, radius, angle)
result = client.publish(topic, msg)
status = result.rc
if status == 0:
print(f"Send `{msg_count}` to topic `{topic}`")
else:
print(f"Failed to send message to topic {topic}")
msg_count += 1
angle += 1
def run():
client = connect_mqtt()
client.loop_start()
publish(client)
if __name__ == '__main__':
run()

View File

@@ -0,0 +1,15 @@
# 构建阶段
FROM python:3.12.8-slim-bookworm as builder
WORKDIR /build
COPY requirements.txt .
RUN pip install --user -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
# 运行阶段
FROM python:3.12.8-slim-bookworm
WORKDIR /app
COPY --from=builder /root/.local/lib/python3.12/site-packages /root/.local/lib/python3.12/site-packages
COPY . .
CMD ["python", "check.py"]

View File

@@ -0,0 +1,279 @@
import datetime
from os import error
import numpy as np
from config import *
def calculate_euclidean_distances(A, B):
# 计算A和B之间的欧式距离
distances = np.linalg.norm(A - B, axis=1)
# 找到最小距离及其索引
min_distance_index = np.argmin(distances)
min_distance = distances[min_distance_index]
return min_distance, min_distance_index
def are_lists_equal(listA, listB):
# 对两个列表中的子列表进行排序
if len(listA) == 0:
return False
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
# 比较排序后的列表是否相等
return sorted_listA == sorted_listB
def sigmoid(x, a=10, b=0.1):
# 调整Sigmoid函数使其在x=1时值为0.5
# a和b是调整参数用于控制函数的形状
return 1 / (1 + np.exp(-a * (x - shift_value))) + b
class KalmanFilter:
def __init__(self, measurement, com_id, measurement_variance=1,process_variance=1e-1):
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp() * 1000000)
ms = measurement.tolist()
self.m = np.array([ms[0],ms[1],ms[2],0,0,0]) # 状态量,6维度
self.origin = [com_id] #origin 表示最强响应
self.source = self.origin #source 表示所有关联的观测值
self.survive = np.array(survive_initial) # 初始化生存值
self.duration = 0
self.counter = 0
self.id = str(timestamp % 3600000000 + np.random.randint(1000))
self.F = [[1,0,0,1,0,0],
[0,1,0,0,1,0],
[0,0,1,0,0,1],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1]]
self.F = np.array(self.F)
self.H = [[1,0,0,0,0,0],
[0,1,0,0,0,0],
[0,0,1,0,0,0]]
self.H = np.array(self.H)
self.R = measurement_variance * np.eye(3)
self.Q = process_variance * np.eye(6)
self.Q[3, 3] = self.Q[3, 3] * 1e-3
self.Q[4, 4] = self.Q[4, 4] * 1e-3
self.Q[5, 5] = self.Q[5, 5] * 1e-3
self.P = np.eye(6)*0.1
self.I = np.eye(6)
self.expend = 1
self.v = np.array([0,0,0])
self.born_time = int(current_time.timestamp() * 1000)
self.latest_update = self.born_time
self.m_history = self.m
self.s_history = []
self.origin_set = [self.origin]
def predict(self):
F = self.F
self.m = np.dot(F,self.m.T) # 简单一步预测模型
self.m = self.m.T
self.P = np.dot(np.dot(F,self.P),F.T) + self.Q
self.survive = self.survive * decay # 应用衰减值
self.origin_set = np.unique(np.array(self.origin_set), axis=0).tolist() # 计算关联集合
def update(self, res, run_timestamp, gate):
self.duration += 0.6 # 每次更新时,持续时间+0.6
if len(res['distances']) == 0:
mmd = 1e8
else:
min_distance_index = np.argmin(res['distances'])
mmd = res['distances'][min_distance_index]
measurement = res['measurements'][min_distance_index]
# 进行更新
if mmd < gate * self.expend:
H = self.H
I = self.I
self.expend = max(self.expend * 0.8, 1)
kalman_gain = np.dot(np.dot(self.P,H.T),np.linalg.pinv(np.dot(np.dot(H,self.P),H.T)+self.R))
self.m += np.dot(kalman_gain,(measurement.T - np.dot(H,self.m.T)))
self.m = self.m.T
self.P = np.dot((I - np.dot(kalman_gain,H)),self.P)
self.origin = [res['key_ids'][min_distance_index]]
self.counter += 1
self.survive = sigmoid(self.counter) # 新映射函数
# 如下操作防止对速度过于自信
self.P[3, 3] = max(1e-1, self.P[3, 3])
self.P[4, 4] = max(1e-1, self.P[4, 4])
self.P[5, 5] = max(1e-1, self.P[5, 5])
# 截取速度
self.v = self.m[3:6]
self.origin_set.append(self.origin)
self.latest_update = run_timestamp #对时间进行处理
else:
self.expend = min(self.expend*1.2,1.5) # 若关联不上,则扩大门限继续搜索
self.P[3, 3] = min(self.P[3, 3]*1.1,1)
self.P[4, 4] = min(self.P[4, 4]*1.1,1)
self.P[5, 5] = min(self.P[5, 5]*1.1,1)
self.counter -= 1
self.counter = max(self.counter,0)
self.m_history = np.vstack((self.m_history, self.m))
self.s_history.append(self.survive)
def one_correlation(self, data_matrix, id_list):
# 计算现有数据与data_matrix的差距
min_distance, min_index = calculate_euclidean_distances(self.m[0:3], data_matrix)
m_id = id_list[min_index]
measurement = data_matrix[min_index, :]
return m_id, min_distance, measurement
def correlation(self, sensor_data):
# 遍历传感器进行计算
res = {'m_ids':[], 'distances':[], 'measurements':[], 'key_ids':[]}
for value in sensor_data:
if len(value['id_list']) > 0:
m_id, min_distance, measurement = self.one_correlation(value['data_matrix'], value['id_list'])
key = value['deviceId']
res['m_ids'].append(m_id)
res['measurements'].append(measurement)
res['key_ids'].append([key, m_id])
# 将发生过关联的目标赋予更大的置信度
if [key, m_id] in self.origin_set:
min_distance = min_distance * 0.2
res['distances'].append(min_distance)
return res
#融合类的构造函数
class DataFusion:
def __init__(self,gate=25,interval = 1,fusion_type = 1,
measuremrnt_variance=1,process_variance =1e-1):
"""
初始化DataFusion类。
"""
# self.task_id = task_id
self.interval = interval
self.gate = gate
self.targets = []
self.fusion_type = fusion_type
self.existence_thres = 0.01
self.show_thres = show_thres
self.process_variance = process_variance
self.measuremrnt_variance = measuremrnt_variance
def set_parameter(self,fusion_parms):
print("GO!!!!!!!!!")
print(fusion_parms)
def obtain_priority(self,sensor_data):
self.priority_dict = dict()
for data in sensor_data:
if data.get('priority'):
self.priority_dict[data['deviceId']] = data['priority']
else:
self.priority_dict[data['deviceId']] = 1
def out_transformer(self,target):
out_former = {
'objectId': target.id,
'survive': target.survive.tolist(),
'state': target.m.tolist(),
'speed': np.linalg.norm(target.v).tolist() / self.interval,
'source': target.source,
'sigma': np.diag(target.P).tolist(),
'X': target.m[0].tolist(),
'Y': target.m[1].tolist(),
'Z': target.m[2].tolist(),
'Vx': target.v[0].tolist(),
'Vy': target.v[1].tolist(),
'Vz': target.v[2].tolist(),
'born_time': str(target.born_time)
}
return out_former
def run(self, sensor_data):
current_time = datetime.datetime.now()
run_timestamp = int(current_time.timestamp() * 1000)
fusion_data = []
selected_list = []
self.obtain_priority(sensor_data)
# 遍历所有已知对象
for target in self.targets:
print(f"Fusion target id:{target.id} with survive: {target.survive} at :{target.m}\n")
if target.survive < self.existence_thres:
continue
target.predict()
res = target.correlation(sensor_data)
target.update(res,run_timestamp,self.gate)
# ==================================================
now_id = []
t_sum = 0
for r, distance in enumerate(res['distances']):
if distance < self.gate:
now_id.append(res['key_ids'][r])
selected_list.append(res['key_ids'][r])
D_Id = res['key_ids'][r][0]
t_sum += self.priority_dict[D_Id]
target.source = now_id
# ==================================================
if self.fusion_type == 2 and t_sum < 2:
target.survive = target.survive * 0.5
out_former = self.out_transformer(target)
if target.survive > self.show_thres: # 若存活概率大于0.4,则写入数据文件
fusion_data.append(out_former)
# 根据匹配关系筛选数值
self.selected_list = selected_list
for data in sensor_data:
self.new_born(data)
self.remove_duplicates()
# ==================================================
self.fusion_process_log(fusion_data)
return fusion_data
def new_born(self,value,):
for j, id in enumerate(value['id_list']):
key = value['deviceId']
if [key, id] not in self.selected_list:
if self.fusion_type == 3:
if value['priority'] > 50:
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
else:
self.targets.append(KalmanFilter(value['data_matrix'][j, :], [key, id],self.measuremrnt_variance,self.process_variance))
self.selected_list.append([key, id]) # 把新增的目标,加入到集合中去
def remove_duplicates(self):
# 创建一个空列表用于存储需要删除的列表的索引
to_delete = []
# 遍历所有列表的索引
for i in range(len(self.targets)):
if self.targets[i].survive < self.existence_thres:
to_delete.append(self.targets[i].id)
continue
if self.targets[i].survive < self.show_thres:
continue
for j in range(i + 1, len(self.targets)):
# 比较两个列表是否相同
if are_lists_equal(self.targets[i].source, self.targets[j].source):
# 如果列表相同,记录编号较大的索引
if self.targets[i].duration < self.targets[j].duration:
to_delete.append(self.targets[i].id)
else:
to_delete.append(self.targets[j].id)
# 使用删除法,提高目标管理效率
for item_id in sorted(to_delete, reverse=True):
for target in self.targets:
if target.id == item_id:
self.targets.remove(target)
break
def fusion_process_log(self,fusion_data):
current_time = datetime.datetime.now()
# 格式化时间为年月日时分秒格式
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('process_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"time: {formatted_time}\n") # 写入分隔符
log_file.write(f"data:\n {fusion_data}\n") # 写入消息内容

View File

@@ -0,0 +1,53 @@
from KF_V2 import *
# ======================
sensor_id_list = ['AUV01','AUV02']
sensor_data = []
sensor_data.append({
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
'id_list': ['001','002'],
'deviceId': 'AUV01',
'devicePs':[0.2], #第一个值表示测量误差
'latest_time': [0],
'priority':1
})
sensor_data.append({
'data_matrix': np.array([[0, 0, 0], [100, 100, 100]]),
'id_list': ['003','004'],
'deviceId': 'AUV02',
'deivceProperties':[0.2],
'latest_time': [0],
'priority':100
})
fusion_container = DataFusion(25,1,3)
for i in range(15):
print(i)
# 在循环开始时,对 sensor_data 中的 data_matrix 进行修改
if i%5 == 0:
temp = {
'data_matrix': np.array([]),
'id_list': [],
'deviceId': 'AUV01',
'devicePs': [0.2], # 第一个值表示测量误差
'latest_time': [0]
}
c_sensor_data = []
c_sensor_data.append(temp)
c_sensor_data.append(temp)
filted_results = fusion_container.run(c_sensor_data)
else:
sensor_data[0]['data_matrix'][0, :] += 1 # 第一行每个元素加1
sensor_data[0]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
sensor_data[1]['data_matrix'][0, :] += 1 # 第一行每个元素加1
sensor_data[1]['data_matrix'][1, :] -= 1 # 第二行每个元素减1
filted_results = fusion_container.run(sensor_data)
print("results:\n")
for d in filted_results:
print(d)

View File

@@ -0,0 +1,142 @@
import numpy as np
from scipy import signal
class AoAConverter:
def __init__(self):
self.p = [1e8, 1e8, 1e8]
def to_cartesian(self, theta_rad, phi_rad):
# theta_rad = np.radians(theta)
# phi_rad = np.radians(phi)
# 注意!程序输入的是弧度单位
"""将球坐标转换为直角坐标"""
x = np.sin(theta_rad) * np.cos(phi_rad)
y = np.sin(theta_rad) * np.sin(phi_rad)
z = np.cos(theta_rad)
pc =np.array([x,y,z])
return pc
def calc_error(self, pc, mc):
# 计算预测坐标与实际观测坐标之间的差的平方
mc = np.expand_dims(mc, axis=1)
diff_squared = (pc - mc) ** 2
# 对差值的平方求和,得到误差的平方
error_squared = np.sum(diff_squared, axis=0)
# 开平方根得到误差
return np.sqrt(error_squared)
import numpy as np
def find_best_r(self, theta, phi, mc, r_range):
"""在给定范围内搜索最优的 r 值"""
# 将 r_range 转换为 NumPy 数组,以便进行矢量化操作
r_values = np.array(r_range)
# 计算所有可能的直角坐标
pc = self.to_cartesian(theta, phi)
# 进行维度扩充以进行矩阵乘法
r_values = np.expand_dims(r_values, axis=0)
pc = np.expand_dims(pc, axis=1)
# 计算所有 r 值对应的误差
# print([pc.shape,r_values.shape])
D = np.dot(pc, r_values)
errors = self.calc_error(D, mc)
r_values = np.squeeze(r_values)
# 找到最小误差及其对应的 r 值
min_error = np.min(errors)
best_r = r_values[np.argmin(errors)] #因为多加了一维所以这里要反求0
return [best_r,min_error]
def projected_measure(self,theta, phi, r,p0):
pc = self.to_cartesian(theta, phi)
neo_p = r*pc + p0
return np.array(neo_p)
converter = AoAConverter()
def calculate_euclidean_distances(A, BX):
# 计算A和B之间的欧式距离
B = BX['data_matrix']
N = B.shape[0]
r_range = np.linspace(-5, 5, 100)
if BX.get('AOA_pos'):
# 若是来自AOA的数据则进行替换
sensor_pos = BX.get('AOA_pos')
ob_pos = A - sensor_pos
r0 = np.linalg.norm(ob_pos)
B_new = []
for i in range(N):
theta = B[i,0]
phi = B[i,1]
[best_r,min_error] = converter.find_best_r(theta, phi,ob_pos, r0+r_range)
print(min_error)
B_new.append(converter.projected_measure(theta, phi,best_r,sensor_pos))
B_new = np.array(B_new)
else:
B_new = B
distances = np.linalg.norm(A - B_new, axis=1)
# 找到最小距离及其索引
min_distance_index = np.argmin(distances)
min_distance = distances[min_distance_index]
return [min_distance, min_distance_index, B_new]
def are_lists_equal(listA, listB):
# 对两个列表中的子列表进行排序
if len(listA) == 0:
return False
sorted_listA = sorted(listA, key=lambda x: (x[0], x[1]))
sorted_listB = sorted(listB, key=lambda x: (x[0], x[1]))
# 比较排序后的列表是否相等
return sorted_listA == sorted_listB
def sigmoid(x, a=10, b=0.1):
# 调整Sigmoid函数使其在x=4时值为0.5
# a和b是调整参数用于控制函数的形状
return 1 / (1 + np.exp(-a * (x - 1))) + b
def calculate_correlation(A, B):
"""
计算两个数组矩阵所有列的相关系数的最大值。
参数:
A -- 第一个NumPy数组
B -- 第二个NumPy数组
"""
A = np.exp(-1j*A/50)
B = np.exp(1j*B/50)
corr_res = []
for col in range(3):
a = A[:, col]
b = B[:, col]
convolution = signal.convolve(a, b[::-1])
corr_res.append(convolution)
max_corr = np.sum(np.abs(np.array(corr_res)),0)
max_corr = np.max(max_corr)/3
return max_corr
def calculate_history_distances(target, b):
# 使用前后向的形式进行计算
A = target.m_history
v = target.v
# 计算每一行与向量b的差的L2范数欧氏距离
if A.shape[0] < 10:
return np.inf
local_time = np.linspace(0, 10, 20)
local_time = np.expand_dims(local_time, axis=1)
v = np.expand_dims(v, axis=1)
A_pre = A[-10:,0:3]
A_post = np.dot(local_time,v.T)
A_all = np.vstack((A_pre, A_post))
distances = np.linalg.norm(A_all - b, axis=1)
# 找到最小距离
min_distance = np.min(distances)
return min_distance

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# 使用说明,在主机之上选择你的合适的目录
# 上传的最新的项目代码,然后把这个脚本放置于你的项目目录之中
# 修改下面的参数
if [[ $# -eq 0 ]]; then
echo "tag version is null!"
exit 233
fi
tag_version=$1
echo "start to build docker image tag is => ${tag_version}"
docker build -t harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version} .
echo ""
echo "login to docker hub"
docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn
echo ""
echo "start to push image to hub!"
docker push harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:${tag_version}

View File

@@ -0,0 +1,374 @@
import os
import subprocess
import paho.mqtt.client as mqtt
import json
import time
import threading
import logging
from config import *
import datetime
import schedule # 需要先安装: pip install schedule
import yaml
# 读取yaml配置
def load_mqtt_config():
config_path = os.getenv('CONFIG_PATH', 'config.yaml')
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return config['mqtt'], config['topics']
# 获取MQTT和topics配置
mqtt_config, topics_config = load_mqtt_config()
# 设置日志配置
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('check.log'),
logging.StreamHandler()
]
)
# 存储运行中的任务及其配置
running_tasks = {}
task_configs = {}
# 启动 Dev_Fusion.py 的命令模板
fusion_command_template = f"nohup python Dev_Fusion.py -t {{task_id}} -g {DEV_FUSION_G} -i {DEV_FUSION_I} > /dev/null 2> error.log &"
# 日志文件夹路径
log_folder = "tasklog"
os.makedirs(log_folder, exist_ok=True)
# 创建全局锁
task_lock = threading.Lock()
def compare_configs(old_config, new_config):
"""
比较两个配置是否有实质性差异
返回 True 表示有差异,需要重启
返回 False 表示无差异,只需转发
"""
try:
# 1. 检查 devices 列表
old_devices = old_config.get('devices', [])
new_devices = new_config.get('devices', [])
if len(old_devices) != len(new_devices):
return True
# 为每个设备创建一个关键信息元组进行比较
def get_device_key(device):
return (
device.get('device_id'),
device.get('device_topic'),
device.get('device_type'),
device.get('reference_point')
)
old_device_keys = {get_device_key(d) for d in old_devices}
new_device_keys = {get_device_key(d) for d in new_devices}
# 如果设备的关键信息有变化,需要重启
if old_device_keys != new_device_keys:
return True
# 2. 检查参考点
old_ref = old_config.get('reference_point')
new_ref = new_config.get('reference_point')
if old_ref != new_ref:
return True
# 3. 其他参数(如 sampling_rate的变化不需要重启
logging.info("No critical configuration changes detected")
return False
except Exception as e:
logging.error(f"Error comparing configs: {str(e)}")
return True # 出错时视为有差异,安全起见重启实例
def stop_task(task_id):
"""停止指定的任务实例"""
try:
if task_id in running_tasks:
process = running_tasks[task_id]
# 使用 pkill 命令终止对应的 Python 进程
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
process.wait(timeout=5) # 等待进程结束
del running_tasks[task_id]
del task_configs[task_id]
logging.info(f"Task {task_id} stopped successfully")
except Exception as e:
logging.error(f"Error stopping task {task_id}: {str(e)}")
# 多线程处理函数
def handle_task(client, task_id, payload):
try:
with task_lock: # 使用锁保护共享资源
data = json.loads(payload)
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
# 记录配置更新
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def log_config_update(action):
with open(log_file, "a") as f:
f.write(f"\n=== Configuration Update at {current_time} ===\n")
f.write(f"Task ID: {task_id}\n")
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
f.write(f"Payload: {payload}\n")
f.write(f"Action: {action}\n")
f.write("=" * 50 + "\n")
# 检查任务是否已经在运行
if task_id in running_tasks:
# 检查是否有存储的配置
if task_id in task_configs:
# 比较新旧配置
if compare_configs(task_configs[task_id], data):
logging.info(f"Configuration changed for task {task_id}, restarting...")
stop_task(task_id)
log_config_update("Configuration changed, restarting instance")
start_new_instance(client, task_id, payload, data)
else:
# 配置无变化,只转发消息
logging.info(f"No configuration change for task {task_id}, forwarding message")
log_config_update("Message forwarded (no critical changes)")
client.publish(sensor_topic, payload)
else:
# 没有存储的配置,存储新配置并转发
logging.info(f"No stored config for task {task_id}, storing first config")
task_configs[task_id] = data
log_config_update("First config stored and forwarded")
client.publish(sensor_topic, payload)
else:
# 任务不存在,启动新实例
log_config_update("New instance started")
start_new_instance(client, task_id, payload, data)
except Exception as e:
logging.error(f"Error handling task {task_id}: {str(e)}")
def start_new_instance(client, task_id, payload, config):
"""启动新的 Dev_Fusion 实例"""
try:
# 启动 Dev_Fusion.py 实例
fusion_command = fusion_command_template.format(task_id=task_id)
process = subprocess.Popen(fusion_command, shell=True)
running_tasks[task_id] = process
task_configs[task_id] = config
logging.info(f"Dev_Fusion.py started successfully for Task ID {task_id}")
# 保存日志,使用追加模式
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open(log_file, "a") as f: # 使用 "a" 模式追加内容
f.write(f"\n=== Configuration Update at {current_time} ===\n")
f.write(f"Task ID: {task_id}\n")
f.write(f"MQTT_TOPIC: {topics_config['mqtt_topic']}\n")
f.write(f"Payload: {payload}\n")
# 记录是否触发了重启
f.write("Action: New instance started\n")
f.write("=" * 50 + "\n")
# 等待实例启动
time.sleep(0.5)
# 发送配置
sensor_topic = topics_config['sensor_topic'].replace("+", task_id)
client.publish(sensor_topic, payload)
logging.info(f"Configuration sent to {sensor_topic}")
except Exception as e:
logging.error(f"Error starting new instance for task {task_id}: {str(e)}")
if task_id in running_tasks:
del running_tasks[task_id]
del task_configs[task_id]
# MQTT 回调函数
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info("Connected to MQTT broker")
client.subscribe(topics_config['mqtt_topic']) # 使用yaml中的topic
else:
logging.error(f"Connection failed with code {rc}: {DISCONNECT_REASONS.get(rc, 'Unknown error')}")
def on_message(client, userdata, msg):
try:
payload = msg.payload.decode("utf-8")
logging.info(f"Received message on topic {msg.topic}")
data = json.loads(payload)
task_id = data.get("task_id")
if task_id:
thread = threading.Thread(target=handle_task, args=(client, task_id, payload))
thread.start()
else:
logging.warning("Received message without task_id")
except json.JSONDecodeError:
logging.error("Received message is not valid JSON")
except Exception as e:
logging.error(f"Error processing message: {str(e)}")
def check_running_instances():
"""检查系统中已经运行的 Dev_Fusion 实例"""
try:
# 使用 ps 命令查找运行中的 Dev_Fusion.py 实例
result = subprocess.run("ps aux | grep 'python.*Dev_Fusion.py' | grep -v grep",
shell=True, capture_output=True, text=True)
found_instances = []
for line in result.stdout.splitlines():
# 从命令行参数中提取 task_id
if '-t' in line:
parts = line.split()
for i, part in enumerate(parts):
if part == '-t' and i + 1 < len(parts):
task_id = parts[i + 1]
pid = parts[1] # 进程 ID 通常在第二列
found_instances.append((task_id, pid))
for task_id, pid in found_instances:
logging.info(f"Found running instance for task {task_id}, pid: {pid}")
# 读取该任务的最新配置
config = read_latest_config(task_id)
if config:
# 将已运行的实例添加到 running_tasks
running_tasks[task_id] = subprocess.Popen(['echo', ''], stdout=subprocess.PIPE)
running_tasks[task_id].pid = int(pid)
task_configs[task_id] = config
logging.info(
f"Successfully loaded config for task {task_id} from tasklog/received_tasklog_{task_id}.txt")
else:
logging.warning(f"No valid config found for task {task_id}, stopping instance...")
subprocess.run(f"pkill -f 'python.*Dev_Fusion.py.*-t {task_id}'", shell=True)
logging.info(f"Stopped instance {task_id} due to missing config")
logging.info(f"Finished checking instances. Loaded {len(running_tasks)} tasks with valid configs")
except Exception as e:
logging.error(f"Error checking running instances: {str(e)}")
def read_latest_config(task_id):
"""读取指定任务的最新配置"""
try:
log_file = os.path.join(log_folder, f"received_tasklog_{task_id}.txt")
if not os.path.exists(log_file):
logging.error(f"No log file found for task {task_id}")
return None
with open(log_file, 'r') as f:
content = f.read()
# 按配置更新块分割
updates = content.split('=== Configuration Update at')
if not updates:
return None
# 获取最后一个更新块
latest_update = updates[-1]
# 提取 Payload
payload_start = latest_update.find('Payload: ') + len('Payload: ')
payload_end = latest_update.find('\nAction:')
if payload_end == -1: # 如果没有 Action 行
payload_end = latest_update.find('\n===')
if payload_start > 0 and payload_end > payload_start:
payload = latest_update[payload_start:payload_end].strip()
return json.loads(payload)
return None
except Exception as e:
logging.error(f"Error reading latest config for task {task_id}: {str(e)}")
return None
def restart_all_instances():
"""重启所有运行中的实例"""
logging.info("Scheduled restart: Beginning restart of all instances")
# 复制当前运行的任务列表,因为我们会修改 running_tasks
tasks_to_restart = list(running_tasks.keys())
for task_id in tasks_to_restart:
try:
# 读取最新配置
config = read_latest_config(task_id)
if not config:
logging.error(f"Could not find latest config for task {task_id}, skipping restart")
continue
# 停止当前实例
logging.info(f"Stopping task {task_id} for scheduled restart")
stop_task(task_id)
# 将配置转换为 JSON 字符串
payload = json.dumps(config)
# 启动新实例
logging.info(f"Starting new instance for task {task_id} with latest config")
start_new_instance(mqtt_client, task_id, payload, config)
except Exception as e:
logging.error(f"Error restarting task {task_id}: {str(e)}")
def setup_scheduled_restart(restart_time="03:00"):
"""设置定时重启任务"""
schedule.every().day.at(restart_time).do(restart_all_instances)
def run_schedule():
while True:
schedule.run_pending()
time.sleep(30) # 每30秒检查一次
# 启动调度器线程
scheduler_thread = threading.Thread(target=run_schedule, daemon=True)
scheduler_thread.start()
def main():
global mqtt_client # 添加全局变量以在重启时使用
# 在启动时检查已运行的实例
check_running_instances()
# 创建 MQTT 客户端
mqtt_client = mqtt.Client()
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.username_pw_set(mqtt_config['username'], mqtt_config['password'])
# 设置定时重启默认凌晨3点
setup_scheduled_restart()
while True:
try:
mqtt_client.connect(mqtt_config['broker'], mqtt_config['port'], 60)
mqtt_client.loop_forever()
except Exception as e:
logging.error(f"MQTT connection error: {str(e)}")
time.sleep(5)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,10 @@
mqtt:
broker: "192.168.35.178"
port: 31884
username: "cmlc"
password: "4YPk*DS%+5"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,67 @@
# # MQTT 配置
# broker = "192.168.35.178" # 代理地址
# port = 31883 # 端口
# username = "cmlc"
# password = "odD8#Ve7.B"
#
# # check.py 使用的topic
# MQTT_TOPIC = "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
#
# # Dev_Fusion.py 使用的topic
# SENSOR_TOPIC = "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
# 在 check 中去配置 Dev_Fusion.py启动命令
DEV_FUSION_G = 40 # 参数 g
DEV_FUSION_I = 0.6 # 参数 i
#KF_V2设置
shift_value = 1
survive_initial = 0.25
decay = 0.7
show_thres = 0.4
reference_point = (104.08, 30.51)
# logs 配置
DISCONNECT_REASONS = {
0: "正常断开",
1: "协议版本不匹配",
2: "客户端标识符无效",
3: "服务器不可用",
4: "用户名或密码错误",
5: "未授权",
6: "消息代理不可用",
7: "TLS错误",
8: "QoS不支持",
9: "客户端已被禁止",
10: "服务器繁忙",
11: "客户端已被禁止(证书相关)",
128: "未指定错误",
129: "畸形数据包",
130: "协议错误",
131: "通信错误",
132: "服务器保持连接超时",
133: "服务器内部错误",
134: "服务器正在关闭",
135: "服务器资源不足",
136: "客户端网络套接字错误",
137: "服务器正在关闭连接",
138: "服务器拒绝连接",
139: "服务器不支持该版本",
140: "客户端ID已被使用",
141: "连接速率超限",
142: "最大连接数超限",
143: "保持连接超时",
144: "会话被接管",
145: "连接已断开",
146: "主题别名无效",
147: "数据包太大",
148: "消息速率太高",
149: "配额超限",
150: "管理行为",
151: "无效的负载格式",
152: "保留未支持",
153: "QoS未支持",
154: "使用另一个服务器",
155: "服务器已迁移",
156: "连接不支持",
}

View File

@@ -0,0 +1,10 @@
mqtt:
broker: "192.168.35.178"
port: 31883
username: "cmlc"
password: "odD8#Ve7.B"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"

View File

@@ -0,0 +1,19 @@
try {
$ErrorActionPreference = "Stop"
Write-Host "1. Uploading binary exec..." -ForegroundColor Green
ssh root@192.168.35.71 "mkdir -p /root/wdd/ranjing-python-devfusion/"
scp C:\Users\wdd\IdeaProjects\ProjectOctopus\agent-common\SplitProject\ranjing-python-devfusion\* root@192.168.35.71:/root/wdd/ranjing-python-devfusion/
Write-Host "2. Exec the command ..." -ForegroundColor Blue
Write-Host ""
Write-Host ""
ssh root@192.168.35.71 "cd /root/wdd/ranjing-python-devfusion/ && docker build -t ranjing/dev-fusion:v1.0 ."
Write-Host ""
Write-Host ""
} catch {
Write-Host "操作失败: $_" -ForegroundColor Red
exit 1
}

View File

@@ -0,0 +1,8 @@
#!/bin/bash
docker run --name devfusion \
-d \
--rm \
-v /root/wdd/ranjing-python-devfusion/config-dev.yaml:/dev-fusion/config.yaml \
harbor.cdcdyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0

View File

@@ -0,0 +1,62 @@
from math import radians, cos, degrees
from math import radians, degrees, sin, cos, atan2, sqrt
def convert_to_cartesian(lat, lon, reference_point):
"""将经纬度转换为基于参考点的直角坐标,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
lat_rad = radians(lat)
lon_rad = radians(lon)
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算基于参考点的平面直角坐标
delta_lon = lon_rad - ref_lon_rad
X = (N + 0) * cos(ref_lat_rad) * delta_lon
Y = (a * (1 - e2)) / (1 - e2 * sin(ref_lat_rad) ** 2) * (lat_rad - ref_lat_rad)
return X, Y
def convert_to_geodetic(x, y, reference_point):
"""将直角坐标转换为经纬度,考虑地球椭球模型"""
# 地球椭球参数WGS84
a = 6378137.0 # 长半轴,单位:米
f = 1 / 298.257223563 # 扁率
e2 = 2 * f - f ** 2 # 第一偏心率平方
# 提取参考点坐标
ref_lat, ref_lon = reference_point
# 转换成弧度
ref_lat_rad = radians(ref_lat)
ref_lon_rad = radians(ref_lon)
# 计算曲率半径
N = a / sqrt(1 - e2 * sin(ref_lat_rad) ** 2) # 参考点处的卯酉圈曲率半径
# 计算纬度
lat_rad = y * (1 - e2 * sin(ref_lat_rad) ** 2) / (a * (1 - e2)) + ref_lat_rad
# 计算经度
if cos(ref_lat_rad) == 0:
lon_rad = 0
else:
lon_rad = x / ((N + 0) * cos(ref_lat_rad)) + ref_lon_rad
# 转换回角度
lat = degrees(lat_rad)
lon = degrees(lon_rad)
return lat, lon

View File

@@ -0,0 +1,423 @@
import datetime
from transformation import *
import json
import numpy as np
class Pipeline:
def __init__(self, fusion_parameters_topic,reference_point):
self.fusion_parameters_topic = fusion_parameters_topic
self.task_id = '554343465692430336'
self.reference_point = reference_point
# self.deviceId = deviceId
self.sensor_id_list = ["10000000000"]
self.fusionCode = 'DPZYLY'
self.publish_topic = f"bridge/{self.fusionCode}/device_data/fusion/{self.task_id}"
self.priority_dict = {"10000000000":1}
self.uavInfo_bucket = dict()
self.target_bowl = dict()
self.device_info_dict = dict()
self.device_type_mapping = {
"5ga": 0,
"radar": 1,
"spec": 2,
"oe": 3,
"cm": 4,
"dec": 5,
"ifr": 6,
"cv": 7,
"isrs": 8,
"aoa": 9,
"tdoa": 10,
"dcd": 11,
"direct": 100,
"rtk": 101,
"rid": 102,
"fusion": 1000,
"other": 999 # 假设 'other' 对应于未知设备类型
}
self.device_type_speedrank = {
"radar": 1,
"spec": 2,
"oe": 3,
"cm": 4,
"dec": 5,
"ifr": 6,
"cv": 7,
"isrs": 8,
"aoa": 9,
"tdoa": 10,
"dcd": 13,
"direct": 12,
"5ga": 11,
"rid": 14,
"rtk": 15,
"other": 0 # 假设 'other' 对应于未知设备类型
}
import json
def process_json_data(self, json_data):
"""
将 JSON 数据转换为字典,并添加 X 和 Y 属性。
"""
data_dict = json.loads(json_data)
# 安全访问 'ptTime' 键
pt_time = data_dict.get('ptTime')
if pt_time is not None:
print(pt_time)
else:
print("Key 'ptTime' not found in data_dict.")
# 安全访问 'objects' 键
objects = data_dict.get('objects')
if objects is None:
print("Key 'objects' not found in data_dict.")
return data_dict # 如果 'objects' 键不存在,直接返回原始字典或根据需要进行其他处理
# 如果 'objects' 键存在,继续处理
for record in objects:
# 检查 'latitude' 和 'longitude' 键是否存在于 record 中
if 'latitude' in record and 'longitude' in record:
lat = record['latitude']
lon = record['longitude']
X, Y = convert_to_cartesian(lat, lon, self.reference_point)
record['X'] = X
record['Y'] = Y
else:
print("Record is missing 'latitude' or 'longitude' keys.")
return data_dict
def data_encoder(self, data_list):
"""
生成数据矩阵和 ID 列表。
"""
sensor_data = []
for sensor_id in self.sensor_id_list:
temp = {'data_matrix': [],
'id_list': [],
'deviceId': sensor_id,
'latest_time': [],
'priority':1}
for record in data_list:
if record.get('noteData'):
obj = record['noteData']
obj['objectId'] = obj['uasId']
obj['deviceId'] = obj["extension"]['deviceId']
record['objects'] = [obj]
if record['deviceId'] == sensor_id:
temp['priority'] = self.priority_dict[sensor_id]
if record.get('objects'):
for obj in record['objects']:
if obj['objectId'] in temp['id_list']:
position = temp['id_list'].index(obj['objectId'])
if int(record['ptTime']) > int(temp['latest_time'][position]):
temp['data_matrix'][position] = [obj['X'], obj['Y'], obj['altitude']]
else:
temp['data_matrix'].append([obj['X'], obj['Y'], obj['altitude']])
temp['id_list'].append(obj['objectId'])
temp['latest_time'].append(record['ptTime'])
# 把扩展地段写入
if obj.get('extension'):
B_id = str(record['deviceId'])+str(obj['objectId'])
self.uavInfo_bucket[B_id] = obj['extension']
# 如果对象有speed字段将其添加到extension中
if obj.get('speed'):
self.uavInfo_bucket[B_id]['speed'] = obj['speed']
# 如果对象有height字段也存储它
if obj.get('height'):
self.uavInfo_bucket[B_id]['height'] = obj['height']
# 写入到数据字典中
temp['data_matrix'] = np.array(temp['data_matrix'])
sensor_data.append(temp)
return sensor_data
def process_extension(self, target):
# 定义一个字典,包含给定的键值对
extension = {
"objectType": 30,
"uavSN": "Un-known",
"uavModel": "Un-known",
"pilotLat": 0.0,
"pilotLon": 0.0,
"speedX": 0.0,
"speedY": 0.0,
"speedZ": 0.0,
"time": 0.0,
"born_time": 0.0
}
# 从target_bowl获取历史值
if target['objectId'] in self.target_bowl.keys():
extension = self.target_bowl[target['objectId']]
result_source = target['source']
# 对数据进行更新
for source in result_source:
id = str(source[0]) + str(source[1])
if self.uavInfo_bucket.get(id):
for key, value in self.uavInfo_bucket[id].items():
# 只有当新值是有效值时才更新
if value not in ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]:
extension[key] = value
extension['born_time'] = int(target['born_time'])
# 更新target_bowl以保持状态
self.target_bowl[target['objectId']] = extension
return extension
def data_decoder(self, filtered_results):
"""
解码过滤后的结果。
"""
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp() * 1000)
combined_objects = []
for target in filtered_results:
X = target['X']
Y = target['Y']
Z = target['Z'] # 这里的Z实际上是altitude
lat, lon = convert_to_geodetic(X, Y, self.reference_point)
extension = self.process_extension(target)
extension['time'] = int(timestamp)
extension['born_time'] = int(int(target['born_time']) / 1000) # 毫秒单位数据
new_origin_source = []
for source in target['source']:
device_id, object_id = source
# 从 device_info_dict 获取设备缩写
device_abbreviation = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
# 使用映射字典获取设备类型
device_type = self.device_type_mapping.get(device_abbreviation, 999)
new_origin_source.append(f"{device_type}_{device_id}_{object_id}")
# 根据优先级顺序选择速度
highest_priority_speed = None
highest_priority = float('inf')
for source in target['source']:
device_id, object_id = source
B_id = str(device_id) + str(object_id)
if self.uavInfo_bucket.get(B_id):
device_type = self.device_info_dict.get(device_id, {}).get('device_type', 'other')
priority = self.device_type_speedrank.get(device_type, float('inf'))
if priority < highest_priority:
highest_priority = priority
# 获取速度并进行单位转换
speed = self.uavInfo_bucket[B_id].get('speed', target['speed'])
if device_type == "5ga": # 如果设备类型是5ga进行转换
speed = speed / 3.6 # 从 km/h 转换为 m/s
highest_priority_speed = speed
# 确保 highest_priority_speed 是从设备获取的速度
if highest_priority_speed is None:
# 如果没有找到当前速度,查找历史记录中的速度
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
highest_priority_speed = obj.get("speed")
break
if highest_priority_speed is None:
print(f"Warning: No speed found for target {target['objectId']}, using default target speed.")
new_speed = target['speed']
else:
new_speed = highest_priority_speed
else:
new_speed = highest_priority_speed
# Debug 输出,检查速度来源
print(f"Selected speed for target {target['objectId']}: {new_speed} from device with priority {highest_priority}")
# 获取height字段
height = None
for source in target['source']:
device_id, object_id = source
B_id = str(device_id) + str(object_id)
if self.uavInfo_bucket.get(B_id):
if self.uavInfo_bucket[B_id].get('height'):
height = self.uavInfo_bucket[B_id]['height']
break
# 如果当前没有获取到height查找历史记录中的height
if height is None:
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
prev_height = obj.get("height")
if prev_height is not None: # 如果找到有效的历史height
height = prev_height
break
# 如果仍然没有找到height保持上一次的最新历史height
if height is None and combined_objects:
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
height = obj.get("height")
break
temp = {
# "msg_cnt":result['msg_cnt'],#增加msg_cnt用于检测有无丢包
"objectId": target['objectId'],
"X": X,
"Y": Y,
"height": height, # 使用当前height或历史height
"altitude": Z,
"speed": new_speed, # 使用优先级最高的速度
'latitude': lat,
'longitude': lon,
'sigma': target['sigma'],
"extension": {
"origin_source": new_origin_source, # 更新后的 origin_source
# 其他extension字段...
"objectType": extension.get('objectType', 0),
"uavSN": extension.get("uavSN", "Un-known"),
"uavModel": extension.get("uavModel", "Un-known"),
"pilotLat": extension.get("pilotLat", 0.0),
"pilotLon": extension.get("pilotLon", 0.0),
"speedX": 0.0, # 不再使用速度分量
"speedY": 0.0,
"speedZ": 0.0,
"time": int(timestamp),
"born_time": int(int(target['born_time']) / 1000),
},
"time": int(timestamp),
}
# 检查extension中的objectType是否已经被设置为非0值如果是则不再覆盖.
if extension.get('objectType', 0) != 0 or target['objectId'] not in [obj['objectId'] for obj in
combined_objects]:
temp["extension"]["objectType"] = extension.get('objectType', 0)
else:
# 查找combined_objects中相同objectId的objectType如果不存在则使用0
existing_object_types = [obj["extension"].get('objectType', 0) for obj in combined_objects if
obj["objectId"] == target['objectId']]
if existing_object_types and existing_object_types[0] != 0:
temp["extension"]["objectType"] = existing_object_types[0]
else:
temp["extension"]["objectType"] = 0
# 检查并更新uavSN和uavModel
invalid_values = ["Un-known", 0.0, None, "Unknown", "DJI Mavic"]
# 检查uavSN是否为字母数字组合防止其他部分引入奇怪的值
current_sn = extension.get('uavSN', "Un-known")
if isinstance(current_sn, str):
has_letter = any(c.isalpha() for c in current_sn)
has_digit = any(c.isdigit() for c in current_sn)
if not (has_letter and has_digit):
# 先查找相同objectId的历史有效SN
for obj in reversed(combined_objects):
if obj["objectId"] == target['objectId']:
prev_sn = obj["extension"].get("uavSN", "Un-known")
if isinstance(prev_sn, str):
has_letter = any(c.isalpha() for c in prev_sn)
has_digit = any(c.isdigit() for c in prev_sn)
if has_letter and has_digit:
current_sn = prev_sn
break
temp["extension"]["uavSN"] = current_sn
temp["extension"]["uavModel"] = extension.get('uavModel', "Un-known")
combined_objects.append(temp)
data_processed = {
"deviceType": 1000,
"providerCode": "DPZYLY",
"deviceId": self.task_id,
"objects": combined_objects,
"ptTime": int(timestamp)
}
# 筛选有意义的数据
if data_processed and data_processed.get("objects") and len(data_processed["objects"]) > 0:
formatted_time = current_time.strftime('%Y-%m-%d %H:%M:%S')
with open('PB_log.txt', 'a') as log_file: # 以追加模式打开日志文件
log_file.write('=====================\n') # 写入分隔符
log_file.write(f"time: {formatted_time}\n") # 写入时间戳
log_file.write(f"data: {data_processed}\n")
return data_processed
def extract_parms(self, parm_data):
"""
提取参数。
"""
id_list = [] # 存储设备ID
priority_dict = {} # 存储设备优先级
device_info_dict = {} # 新增:存储设备详细信息的字典,用于后续拿到与
data_dict = json.loads(parm_data)
print(data_dict)
self.task_id = data_dict['task_id']
new_topics = [("fromcheck/DPZYLY/fly_data/rtk/#", 0)]
devices = data_dict['devices']
for device in devices:
device_id = device['device_id']
if device_id:
id_list.append(device_id)
new_topics.append((device["device_topic"], 0))
# 存储设备优先级默认优先级为1
if device.get('priority'):
priority_dict[device_id] = device['priority']
else:
priority_dict[device_id] = 1
# 使用列表存储设备的详细信息topic、type、sampling_rate完成一对多
device_info_dict[device_id] = {
'device_topic': device['device_topic'],
'device_type': device['device_type'],
'sampling_rate': device['properties'].get('sampling_rate', 1) # 默认为None如果没有提供
}
self.priority_dict = priority_dict
self.device_info_dict = device_info_dict # 将设备信息字典存储到实例变量中
self.sensor_id_list = id_list
# 处理参考点
if data_dict.get('reference_point'):
try:
original_reference_point = data_dict['reference_point']
if len(original_reference_point) == 2: # 确保是包含两个元素的元组或列表
self.reference_point = (
float(original_reference_point[0]) + 0,
float(original_reference_point[1]) + 0
)
else:
raise ValueError("Invalid reference_point structure. Must be a tuple or list with two elements.")
except Exception as e:
print(f"Error processing reference_point: {e}")
self.reference_point = None # 或者设置为某个默认值
return new_topics
def extract_fusion_parms(self,parm_data):
data_dict = json.loads(parm_data)
# 定义 fusion_dict 字典,包含需要从 data_dict 中提取的键
fusion_dict = {
"fusion_type": 1,
"gate": 1,
"interval": 1,
"show_thres": 0.4
}
# 检查 data_dict 中是否存在对应的键,并更新 fusion_dict 中的值
if "fusion_type" in data_dict:
fusion_dict["fusion_type"] = data_dict["fusion_type"]
if "gate" in data_dict:
fusion_dict["gate"] = data_dict["gate"]
if "interval" in data_dict:
fusion_dict["interval"] = data_dict["interval"]
if "show_thres" in data_dict:
fusion_dict["show_thres"] = data_dict["show_thres"]
# 返回更新后的 fusion_dict
return fusion_dict

View File

@@ -0,0 +1,71 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fe-configmap
namespace: doriscluster
labels:
app.kubernetes.io/component: fe
data:
fe.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
# the output dir of stderr and stdout
LOG_DIR = ${DORIS_HOME}/log
JAVA_OPTS="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:$DORIS_HOME/log/fe.gc.log.$CUR_DATE"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xlog:gc*:$DORIS_HOME/log/fe.gc.log.$CUR_DATE:time"
# INFO, WARN, ERROR, FATAL
sys_log_level = INFO
# NORMAL, BRIEF, ASYNC
sys_log_mode = NORMAL
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
http_port = 8030
arrow_flight_sql_port = 9090
rpc_port = 9020
query_port = 9030
edit_log_port = 9010
enable_fqdn_mode = true
---
apiVersion: v1
kind: ConfigMap
metadata:
name: be-configmap
namespace: doriscluster
labels:
app.kubernetes.io/component: be
data:
be.conf: |
CUR_DATE=`date +%Y%m%d-%H%M%S`
PPROF_TMPDIR="$DORIS_HOME/log/"
JAVA_OPTS="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xloggc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# For jdk 9+, this JAVA_OPTS will be used as default JVM options
JAVA_OPTS_FOR_JDK_9="-Xmx1024m -DlogPath=$DORIS_HOME/log/jni.log -Xlog:gc:$DORIS_HOME/log/be.gc.log.$CUR_DATE -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.java.command=DorisBE -XX:-CriticalJNINatives -DJDBC_MIN_POOL=1 -DJDBC_MAX_POOL=100 -DJDBC_MAX_IDLE_TIME=300000 -DJDBC_MAX_WAIT_TIME=5000"
# since 1.2, the JAVA_HOME need to be set to run BE process.
# JAVA_HOME=/path/to/jdk/
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
# https://jemalloc.net/jemalloc.3.html
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,lg_tcache_max:20,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
JEMALLOC_PROF_PRFIX=""
# INFO, WARNING, ERROR, FATAL
sys_log_level = INFO
# ports for admin, web, heartbeat service
be_port = 9060
webserver_port = 8040
heartbeat_service_port = 9050
arrow_flight_sql_port = 39091
brpc_port = 8060

View File

@@ -0,0 +1,101 @@
apiVersion: doris.selectdb.com/v1
kind: DorisCluster
metadata:
labels:
app.kubernetes.io/name: doriscluster
name: doriscluster-helm
namespace: doriscluster
spec:
feSpec:
replicas: 1
image: harbor.cdcyy.com.cn/cmii/doris.fe-ubuntu:2.1.6
limits:
cpu: 8
memory: 16Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap fe-configmap --from-file=fe.conf
configMapName: fe-configmap
resolveKey: fe.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/fe/doris-meta
name: doriscluster-storage0
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
# notice: if the storage size less 5G, fe will not start normal.
requests:
storage: 300Gi
- mountPath: /opt/apache-doris/fe/log
name: doriscluster-storage1
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
- mountPath: /opt/apache-doris/fe/jdbc_drivers
name: doriscluster-storage-fe-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
beSpec:
replicas: 3
image: harbor.cdcyy.com.cn/cmii/doris.be-ubuntu:2.1.6
limits:
cpu: 8
memory: 24Gi
requests:
cpu: 2
memory: 6Gi
configMapInfo:
# use kubectl create configmap be-configmap --from-file=be.conf
configMapName: be-configmap
resolveKey: be.conf
nodeSelector:
uavcloud.env: demo
persistentVolumes:
- mountPath: /opt/apache-doris/be/storage
name: doriscluster-storage2
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 300Gi
- mountPath: /opt/apache-doris/be/log
name: doriscluster-storage3
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
- mountPath: /opt/apache-doris/be/jdbc_drivers
name: doriscluster-storage-be-jdbc-drivers
persistentVolumeClaimSpec:
# when use specific storageclass, the storageClassName should reConfig, example as annotation.
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,347 @@
# Source: doris-operator/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: serviceaccount
app.kubernetes.io/instance: controller-doris-operator-sa
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: doris-operator
rules:
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/finalizers
verbs:
- update
- apiGroups:
- doris.selectdb.com
resources:
- dorisclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: doris-operator/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: clusterrolebinding
app.kubernetes.io/instance: doris-operator-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: doris-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: doris-operator
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/leader-election-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: role
app.kubernetes.io/instance: leader-election-role
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-role
namespace: doriscluster
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: doris-operator/templates/leader-election-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: rolebinding
app.kubernetes.io/instance: leader-election-rolebinding
app.kubernetes.io/component: rbac
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
app.kubernetes.io/managed-by: Helm
name: leader-election-rolebinding
namespace: doriscluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: doris-operator
namespace: doriscluster
---
# Source: doris-operator/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: doris-operator
namespace: doriscluster
labels:
control-plane: doris-operator
app.kubernetes.io/name: deployment
app.kubernetes.io/instance: doris-operator
app.kubernetes.io/component: doris-operator
app.kubernetes.io/created-by: doris-operator
app.kubernetes.io/part-of: doris-operator
spec:
selector:
matchLabels:
control-plane: doris-operator
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: doris-operator
labels:
control-plane: doris-operator
spec:
# TODO(user): Uncomment the following code to configure the nodeAffinity expression
# according to the platforms which are supported by your solution.
# It is considered best practice to support multiple architectures. You can
# build your manager image using the makefile target docker-buildx.
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/arch
# operator: In
# values:
# - amd64
# - arm64
# - ppc64le
# - s390x
# - key: kubernetes.io/os
# operator: In
# values:
# - linux
securityContext:
runAsNonRoot: true
# TODO(user): For common cases that do not require escalating privileges
# it is recommended to ensure that all your Pods/Containers are restrictive.
# More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
# Please uncomment the following code if your project does NOT have to work on old Kubernetes
# versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
# seccompProfile:
# type: RuntimeDefault
containers:
- command:
- /dorisoperator
args:
- --leader-elect
image: harbor.cdcyy.com.cn/cmii/doris.k8s-operator:1.3.1
name: dorisoperator
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
requests:
cpu: 2
memory: 4Gi
limits:
cpu: 2
memory: 4Gi
serviceAccountName: doris-operator
terminationGracePeriodSeconds: 10

View File

@@ -0,0 +1,138 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: pyfusion-configmap
namespace: uavcloud-devflight
data:
config.yaml: |-
mqtt:
broker: "helm-emqxs"
port: 1883
username: "cmlc"
password: "4YPk*DS%+5"
topics:
mqtt_topic: "bridge/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
sensor_topic: "fromcheck/DP74b4ef9fb4aaf269/device_data/FU_PAM/+"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: cmii-uavms-pyfusion
namespace: uavcloud-devflight
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
replicas: 1
selector:
matchLabels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
template:
metadata:
creationTimestamp: null
labels:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
spec:
volumes:
- name: nfs-backend-log-volume
persistentVolumeClaim:
claimName: nfs-backend-log-pvc
- name: pyfusion-conf
configMap:
name: pyfusion-configmap
items:
- key: config.yaml
path: config.yaml
containers:
- name: cmii-uavms-pyfusion
image: 'harbor.cdcyy.com.cn/cmii/cmii-uavms-pyfusion:6.2.0'
ports:
- name: pod-port
containerPort: 8080
protocol: TCP
env:
- name: VERSION
value: 6.2.0
- name: NACOS_SYSTEM_CONFIG_NAME
value: cmii-backend-system
- name: NACOS_SERVICE_CONFIG_NAME
value: cmii-uavms-pyfusion
- name: NACOS_SERVER_ADDRESS
value: 'helm-nacos:8848'
- name: K8S_NAMESPACE
value: uavcloud-devflight
- name: APPLICATION_NAME
value: cmii-uavms-pyfusion
- name: NACOS_DISCOVERY_PORT
value: '8080'
- name: BIZ_CONFIG_GROUP
value: 6.2.0
- name: SYS_CONFIG_GROUP
value: 6.2.0
- name: IMAGE_VERSION
value: 6.2.0
resources:
limits:
cpu: '2'
memory: 3Gi
requests:
cpu: 200m
memory: 500Mi
volumeMounts:
- name: nfs-backend-log-volume
mountPath: /cmii/logs
subPath: uavcloud-devflight/cmii-uavms-pyfusion
- name: pyfusion-conf
mountPath: /app/config.yaml
subPath: config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: { }
imagePullSecrets:
- name: harborsecret
affinity: { }
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: cmii-uavms-pyfusion
namespace: uavcloud-devflight
labels:
app.kubernetes.io/app-version: 6.2.0
app.kubernetes.io/managed-by: octopus-control
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
octopus/control: backend-app-1.0.0
spec:
ports:
- name: backend-tcp
protocol: TCP
port: 8080
targetPort: 8080
selector:
cmii.app: cmii-uavms-pyfusion
cmii.type: backend
type: ClusterIP
sessionAffinity: None

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -29,6 +29,7 @@ metadata:
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/flight-control)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
@@ -143,6 +144,11 @@ spec:
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /devflight/flight-control/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-flight-control
servicePort: 9528
- path: /devflight/hljtt/?(.*)
pathType: ImplementationSpecific
backend:

View File

@@ -2,43 +2,71 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
name: tenant-prefix-traffic
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
name: tenant-prefix-armypeople
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
name: tenant-prefix-multiterminal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
@@ -57,6 +85,20 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: uavcloud-devflight
@@ -71,272 +113,6 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-secenter
namespace: uavcloud-devflight
@@ -366,28 +142,14 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
name: tenant-prefix-logistics
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "multiterminal",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
@@ -407,90 +169,6 @@ data:
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: uavcloud-devflight
@@ -520,14 +198,14 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
name: tenant-prefix-uas
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
@@ -548,55 +226,391 @@ data:
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
name: tenant-prefix-base
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
name: tenant-prefix-media
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
name: tenant-prefix-open
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hyper",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-awareness
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "awareness",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uavmsmanager
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uavmsmanager",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-renyike
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "renyike",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dikongzhixingh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "dikongzhixingh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: uavcloud-devflight
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "devflight",
CloudHOST: "lab.uavcmlc.com",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}

View File

@@ -19,6 +19,7 @@ metadata:
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/awareness)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
@@ -92,6 +93,11 @@ spec:
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /devflight/awareness/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-awareness
servicePort: 9528
- path: /devflight/base/?(.*)
pathType: ImplementationSpecific
backend:

View File

@@ -52,6 +52,7 @@ var CmiiBackendAppMap = map[string]string{
"cmii-sky-converge": "5.7.0",
"cmii-uav-sec-awareness": "5.7.0",
"cmii-uav-security-trace": "5.7.0",
"cmii-uavms-pyfusion": "5.7.0",
}
var CmiiFrontendAppMap = map[string]string{
@@ -88,6 +89,7 @@ var CmiiFrontendAppMap = map[string]string{
"cmii-uav-platform-dispatchh5": "5.2.0",
"cmii-uavms-platform-manager": "5.2.0",
"cmii-uav-platform-awareness": "5.2.0",
"cmii-uav-platform-flight-control": "5.2.0",
}
var IgnoreCmiiBackendAppName = map[string]string{
@@ -175,6 +177,10 @@ var MiddlewareAmd64 = []string{
"docker.107421.xyz/jerrychina2020/rke-tools:v0.175-linux",
"docker.107421.xyz/jerrychina2020/rke-tools:v0.175",
"docker.107421.xyz/library/busybox:latest",
"harbor.cdcyy.com.cn/cmii/doris.be-ubuntu:2.1.6",
"harbor.cdcyy.com.cn/cmii/doris.be-ubuntu:2.1.6",
"harbor.cdcyy.com.cn/cmii/doris.k8s-operator:1.3.1",
"harbor.cdcyy.com.cn/cmii/alpine:latest",
}
var Rancher1204Amd64 = []string{

View File

@@ -45,6 +45,7 @@ var FrontendShortNameMaps = map[string]string{
"cmii-uavms-platform-manager": "uavmsmanager",
"cmii-uav-platform-renyike": "renyike",
"cmii-uav-platform-awareness": "awareness",
"cmii-uav-platform-flight-control": "flight-control",
}
var FrontendClientIdMaps = map[string]string{
@@ -92,4 +93,5 @@ var FrontendClientIdMaps = map[string]string{
"cmii-uavms-platform-manager": "empty",
"cmii-uav-platform-renyike": "empty",
"cmii-uav-platform-awareness": "empty",
"cmii-uav-platform-flight-control": "empty",
}

View File

@@ -10,57 +10,57 @@ import (
*/
func TestFetchDependencyRepos_Middle(t *testing.T) {
errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, false, false)
errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, true, false)
utils.BeautifulPrintListWithTitle(errorPullImageList, "errorPullImageList")
utils.BeautifulPrintListWithTitle(errorGzipImageList, "errorGzipImageList")
utils.BeautifulPrintListWithTitle(realCmiiImageName, "realCmiiImageName")
utils.BeautifulPrintListWithTitle(allGzipFileNameList, "allGzipFileNameList")
}
func TestFetchDependencyRepos_RKE(t *testing.T) {
errorPullImageList, errorGzipImageList, realCmiiImageName, allGzipFileNameList := C_DownloadCompressUploadDependency(true, false, true)
utils.BeautifulPrintListWithTitle(errorPullImageList, "errorPullImageList")
utils.BeautifulPrintListWithTitle(errorGzipImageList, "errorGzipImageList")
utils.BeautifulPrintListWithTitle(realCmiiImageName, "realCmiiImageName")
utils.BeautifulPrintListWithTitle(allGzipFileNameList, "allGzipFileNameList")
}
func TestLoadSplitGzipImageToTargetHarbor(t *testing.T) {
errorLoadImageNameList, errorPushImageNameList := LoadSplitCmiiGzipImageToTargetHarbor("xmyd", DirectPushDeployHarborHost)
utils.BeautifulPrintListWithTitle(errorLoadImageNameList, "errorLoadImageNameList")
utils.BeautifulPrintListWithTitle(errorPushImageNameList, "errorPushImageNameList")
}
func TestLoadSplitDepGzipImageToTargetHarbor(t *testing.T) {
errorLoadImageNameList, errorPushImageNameList := LoadSplitDepGzipImageToTargetHarbor(DirectPushDeployHarborHost)
utils.BeautifulPrintListWithTitle(errorLoadImageNameList, "errorLoadImageNameList")
utils.BeautifulPrintListWithTitle(errorPushImageNameList, "errorPushImageNameList")
}
func TestPullFromEntityAndSyncConditionally(t *testing.T) {
// 下载镜像的核心函数
sync := ImageSyncEntity{
DownloadCondition: &DownloadEntity{
ShouldDownloadImage: true,
ProjectName: "6.2.0_demo",
ProjectVersion: "6.2.0-demo",
CmiiNameTagList: []string{
CmiiNameTagList: []string{
//"cmii-uav-mqtthandler:5.4.0-bjdyt-052102",
},
FullNameImageList: []string{},
FullNameImageList: []string{},
DownloadAuthUserName: "",
DownloadAuthPassword: "",
},
CompressCondition: &CompressEntity{
ShouldCompressImageToGzip: true,
ShouldGzipSplit: true,
@@ -72,12 +72,12 @@ func TestPullFromEntityAndSyncConditionally(t *testing.T) {
DirectHarborHost: "harbor.wdd.io:8033",
ShouldDirectPushToHarbor: false,
}
// 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。
result := sync.PullFromEntityAndSyncConditionally()
utils.BeautifulPrint(result)
// 添加断言以检查函数的输出,例如:
// ...其他验证逻辑...
}
@@ -89,7 +89,7 @@ func TestPullFromEntityAndSyncConditionally_ChongQingErJiPingTai(t *testing.T) {
ShouldDownloadImage: true,
ProjectName: "cqejpt",
ProjectVersion: "",
CmiiNameTagList: []string{
CmiiNameTagList: []string{
//"cmii-uav-mqtthandler:5.4.0-bjdyt-052102",
},
FullNameImageList: []string{
@@ -158,7 +158,7 @@ func TestPullFromEntityAndSyncConditionally_ChongQingErJiPingTai(t *testing.T) {
DownloadAuthUserName: "",
DownloadAuthPassword: "",
},
CompressCondition: &CompressEntity{
ShouldCompressImageToGzip: false,
ShouldGzipSplit: true,
@@ -170,43 +170,43 @@ func TestPullFromEntityAndSyncConditionally_ChongQingErJiPingTai(t *testing.T) {
DirectHarborHost: "chongqingshcis-a189ec98.ecis.chongqing-1.cmecloud.cn",
ShouldDirectPushToHarbor: true,
}
// 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。
result := sync.PullFromEntityAndSyncConditionally()
utils.BeautifulPrint(result)
// 添加断言以检查函数的输出,例如:
// ...其他验证逻辑...
}
func TestDownloadLoadTagUpload(t *testing.T) {
localGzipFileList := []string{
"/root/octopus_image/middle",
"/root/octopus_image/rke",
"/root/octopus_image/xjyd",
}
for _, s := range localGzipFileList {
log.InfoF("start to sync => %s", s)
targetImageFullNameList := A_DownloadLoadTagUpload(false, "", "", s, "172.28.0.251:8033")
utils.BeautifulPrintListWithTitle(targetImageFullNameList, "targetImageFullNameList")
}
}
func TestConcatAndUniformCmiiImage(t *testing.T) {
// 创建一个模拟的fullImageList和cmiiImageList用于测试函数的行为。这里需要根据你的实际需求来设置mock数据和预期结果。
fullImageList := []string{"image3", "image4"}
cmiiImageList := []string{"image1", "image2"}
// 调用函数并获取结果。这里需要根据你的实际需求来验证返回的结果是否符合预期。
result := concatAndUniformCmiiImage(fullImageList, cmiiImageList)
// 添加断言以检查函数的输出,例如:
expectedResult := []string{"image3", "image4", image2.CmiiHarborPrefix + "image1", image2.CmiiHarborPrefix + "image2"}
if len(result) != len(expectedResult) {
@@ -228,9 +228,9 @@ func TestImageSyncEntity_PullFromEntityAndSyncConditionally(t *testing.T) {
},
DirectHarborHost: "36.134.71.138",
}
imageSyncResult := imageSyncEntity.PullFromEntityAndSyncConditionally()
utils.BeautifulPrint(imageSyncResult)
}

View File

@@ -154,16 +154,17 @@ func judgeCanConnectInternet() int {
// GetPublicInfo 获取服务器的公网信息
func (p PublicInfo) GetPublicInfo() PublicInfo {
if CanConnectInternet() == InternetBaseLine {
// 无法联网, 假信息
// 无法联网, 假信息
fakePublicInfo := PublicInfo{
IPv4: "1.1.1.1",
IPv6: "2400::1",
Country: "CN",
City: "Shanghai",
ASN: "Wdd Inc",
Timezone: "Asia/Shanghai",
}
fakePublicInfo := PublicInfo{
IPv4: "1.1.1.1",
IPv6: "2400::1",
Country: "CN",
City: "Shanghai",
ASN: "Wdd Inc",
}
if CanConnectInternet() == InternetBaseLine {
// 持久化保存
ConfigCache.Agent.Network.Public = fakePublicInfo
@@ -181,6 +182,8 @@ func (p PublicInfo) GetPublicInfo() PublicInfo {
req, err := http.NewRequest("GET", "https://ipinfo.io", nil)
if err != nil {
log.Error("创建请求失败: %v", err)
return fakePublicInfo
}
// 设置请求头
@@ -190,7 +193,8 @@ func (p PublicInfo) GetPublicInfo() PublicInfo {
// 发送请求
resp, err := client.Do(req)
if err != nil {
log.Error("请求PublicInfo失败: %v", err)
log.Error("请求PublicInfo失败: %s", err.Error())
return fakePublicInfo
}
defer resp.Body.Close()

View File

@@ -3,12 +3,12 @@
rm -f /usr/local/bin/agent-wdd
rm -f /usr/local/bin/test-shell.sh
wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -O /usr/local/bin/agent-wdd
wget https://pan.107421.xyz/d/oracle-seoul-2/agent-wdd_linux_amd64 -qO /usr/local/bin/agent-wdd
chmod +x /usr/local/bin/agent-wdd
wget https://pan.107421.xyz/d/oracle-seoul-2/test-shell.sh -O /usr/local/bin/test-shell.sh
wget https://pan.107421.xyz/d/oracle-seoul-2/test-shell.sh -qO /usr/local/bin/test-shell.sh
chmod +x /usr/local/bin/test-shell.sh