新增雄安空能院项目
This commit is contained in:
54
66-202505-浙江二级监管/0-批量脚本.sh
Normal file
54
66-202505-浙江二级监管/0-批量脚本.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
host_ip_list=(192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 192.168.10.16 192.168.10.17 192.168.10.18 192.168.10.19)
|
||||
|
||||
for server in "${host_ip_list[@]}";do
|
||||
echo "server is ${server}"
|
||||
|
||||
# ssh -p 2202 root@"$server" "mkdir /root/.ssh && echo \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIgzVwaG6h4al71GhrM2zRmJ8hg7ySelDM0GXUz3SZiF wdd@cmii.com\" >> /root/.ssh/authorized_keys"
|
||||
|
||||
ssh -p 2202 root@"$server" "echo yes !"
|
||||
# ssh -p 2202 root@"$server" "systemctl start nfs-client & systemctl start nfs-client & systemctl start nfs-common & systemctl enable nfs-common"
|
||||
# ssh -p 2202 root@"$server" "yum install -y chrony"
|
||||
# ssh -p 2202 root@"$server" "sed -i \"s/server 10.211.174.206 iburst/server 192.168.10.3 iburst/g\" /etc/chrony.conf"
|
||||
# ssh -p 2202 root@"$server" "systemctl restart chronyd && systemctl enable chronyd"
|
||||
# ssh -p 2202 root@"$server" "timedatectl && echo "" && chronyc sources"
|
||||
# ssh -p 2202 root@"$server" "cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back_wdd"
|
||||
# ssh -p 2202 root@"$server" "rm /etc/ssh/sshd_config"
|
||||
# scp -P 2202 /etc/ssh/sshd_config root@"$server":/etc/ssh/sshd_config
|
||||
|
||||
# ssh -p 2202 root@"$server" "systemctl restart sshd"
|
||||
|
||||
# scp -P 2202 /root/yanko/files/docker-19.03.15.tgz root@"$server":/data/
|
||||
|
||||
|
||||
# ssh -p 2202 root@"$server" "sudo tar -xzvf /data/docker-19.03.15.tgz -C /usr/bin --strip-components=1"
|
||||
# ssh -p 2202 root@"$server" "systemctl restart docker && sleep 3 && docker info"
|
||||
|
||||
# scp -P 2202 /root/agent-wdd_linux_amd64 root@"$server":/usr/local/bin/agent-wdd
|
||||
# ssh -p 2202 root@"$server" "chmod +x /usr/local/bin/agent-wdd"
|
||||
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base swap"
|
||||
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base firewall"
|
||||
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base selinux"
|
||||
# ssh -p 2202 root@"$server" "/usr/local/bin/agent-wdd base sysconfig"
|
||||
|
||||
# ssh -p 2202 root@"$server" "docker stop \$(docker ps -aq)"
|
||||
# ssh -p 2202 root@"$server" "docker container rm \$(docker ps -aq)"
|
||||
|
||||
ssh -p 2202 root@"$server" "reboot"
|
||||
|
||||
|
||||
|
||||
|
||||
done
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
182
66-202505-浙江二级监管/ai-config.yaml
Normal file
182
66-202505-浙江二级监管/ai-config.yaml
Normal file
@@ -0,0 +1,182 @@
|
||||
app:
|
||||
env: default
|
||||
port: 2333
|
||||
log:
|
||||
level: DEBUG
|
||||
node:
|
||||
cluster:
|
||||
enable: false
|
||||
capacity:
|
||||
cpu: 8
|
||||
id: "auto"
|
||||
ip: "auto"
|
||||
redis:
|
||||
host: 192.168.10.3
|
||||
port: 36379
|
||||
database: 6
|
||||
password: Mcache@4522
|
||||
rabbitmq:
|
||||
host: 192.168.10.3
|
||||
port: 35672
|
||||
username: admin
|
||||
password: nYcRN91r._hj
|
||||
mqtt:
|
||||
host: 192.168.10.3
|
||||
port: 32883
|
||||
username: cmlc
|
||||
password: odD8#Ve7.B
|
||||
|
||||
ai_models:
|
||||
# Then remember to synchronously updated the configuration here
|
||||
# to ModelStore core:tasking:store:ModelStore
|
||||
local:
|
||||
drone:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drone-20241223-t4.rt"
|
||||
classes: "drone, bird"
|
||||
sea:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/sea.engine"
|
||||
classes: "person, boat"
|
||||
people_vehicle:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/people_vehicle-t4-20240410.rt"
|
||||
classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
|
||||
vehicle:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/vehicle-20240328-t4.rt"
|
||||
classes: "others, people, crowd, motor, car, truck, bus, non-motor vehicle"
|
||||
inf_person:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/inf_person-20241129-t4.rt"
|
||||
classes: "person"
|
||||
ship:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
|
||||
classes: "ship"
|
||||
ship_with_flag:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/ship-20240306-t4.rt"
|
||||
classes: "ship, flag"
|
||||
drowning:
|
||||
enable: true
|
||||
type: yolov8
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/yolov8/drowning-20240222-t4.rt"
|
||||
classes: "drowner"
|
||||
dino:
|
||||
enable: false
|
||||
type: dino
|
||||
path: "/cmii/cmlc-project-ai-streaming-engine/models/dino/ground.engine"
|
||||
tokenizer: "bert-base-uncased"
|
||||
fake:
|
||||
# Do nothing. For some tasks not need ai processing such as adding text.
|
||||
enable: true
|
||||
type: fake
|
||||
|
||||
|
||||
task:
|
||||
plain:
|
||||
usage:
|
||||
cpu: 2
|
||||
laad:
|
||||
usage:
|
||||
cpu: 2
|
||||
mq:
|
||||
detail:
|
||||
topic: "event.ai.photoelectricity.warn.detail"
|
||||
exchange: "event.ai.photoelectricity.warn.detail"
|
||||
briefly:
|
||||
topic: "event.ai.photoelectricity.warn.briefly"
|
||||
exchange: "event.ai.photoelectricity.warn.briefly"
|
||||
count:
|
||||
usage:
|
||||
cpu: 2
|
||||
mq:
|
||||
topic: "aiVideo"
|
||||
exchange: "aiVideo"
|
||||
accumulation:
|
||||
usage:
|
||||
cpu: 2
|
||||
mq:
|
||||
topic: "aiVideo"
|
||||
exchange: "aiVideo"
|
||||
text:
|
||||
usage:
|
||||
cpu: 2
|
||||
|
||||
module:
|
||||
shm:
|
||||
ring_size: 20
|
||||
max_w: 2600
|
||||
max_h: 1500
|
||||
max_dets: 256
|
||||
smot:
|
||||
alive: 1
|
||||
tolerance: 256
|
||||
drop: 192
|
||||
hits: 2
|
||||
ffio:
|
||||
gpu:
|
||||
enable: true
|
||||
track:
|
||||
type: bytetrack
|
||||
bytetrack:
|
||||
fps: 30
|
||||
draw:
|
||||
colors:
|
||||
default: [ 0, 255, 0 ]
|
||||
drone: [ 229, 57, 57 ]
|
||||
bird: [ 97, 237, 38 ]
|
||||
motor: [ 92, 184, 255 ]
|
||||
car: [ 67, 144, 219 ]
|
||||
truck: [ 41, 115, 204 ]
|
||||
bus: [ 36, 93, 179 ]
|
||||
person: [ 255, 200, 51 ]
|
||||
people: [ 255, 200, 51 ]
|
||||
drowner: [ 0, 127, 245 ]
|
||||
ship: [ 102, 236, 204 ]
|
||||
region: [60, 110, 156]
|
||||
crossline: [60, 110, 156]
|
||||
text:
|
||||
padding: 4
|
||||
skip_threshold: 20
|
||||
|
||||
cmlc:
|
||||
mapper:
|
||||
"111":
|
||||
task: count
|
||||
model: vehicle
|
||||
"114":
|
||||
task: count
|
||||
model: vehicle
|
||||
"115":
|
||||
task: accumulation
|
||||
model: vehicle
|
||||
"112":
|
||||
task: count
|
||||
model: inf_person
|
||||
"113":
|
||||
task: count
|
||||
model: drowning
|
||||
"121":
|
||||
task: laad
|
||||
model: drone
|
||||
"122":
|
||||
task: count
|
||||
model: drone
|
||||
"131":
|
||||
task: count
|
||||
model: ship
|
||||
"201":
|
||||
task: text
|
||||
model: fake
|
||||
|
||||
debug:
|
||||
enable: true
|
||||
324
66-202505-浙江二级监管/cluster.yaml
Normal file
324
66-202505-浙江二级监管/cluster.yaml
Normal file
@@ -0,0 +1,324 @@
|
||||
nodes:
|
||||
- address: 192.168.10.3
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
internal_address: 192.168.10.3
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 192.168.10.4
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.4
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
mysql-deploy: true
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.5
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.5
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.6
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.6
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.2
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.2
|
||||
labels:
|
||||
mongo.node: master
|
||||
- address: 192.168.10.8
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.8
|
||||
labels:
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.9
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.9
|
||||
labels:
|
||||
redis.node: master
|
||||
- address: 192.168.10.20
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.20
|
||||
labels:
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.21
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.21
|
||||
labels:
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.22
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.22
|
||||
labels:
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.23
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.23
|
||||
labels:
|
||||
uavcloud.env: zjyd
|
||||
- address: 192.168.10.16
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.16
|
||||
labels:
|
||||
doris.cluster: "true"
|
||||
- address: 192.168.10.17
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.17
|
||||
labels:
|
||||
doris.cluster: "true"
|
||||
- address: 192.168.10.18
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.18
|
||||
labels:
|
||||
doris.cluster: "true"
|
||||
- address: 192.168.10.19
|
||||
user: root
|
||||
port: 2202
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.10.19
|
||||
labels:
|
||||
doris.cluster: "true"
|
||||
|
||||
|
||||
|
||||
|
||||
authentication:
|
||||
strategy: x509
|
||||
sans:
|
||||
- "192.168.10.3"
|
||||
|
||||
private_registries:
|
||||
- url: 192.168.10.3:8033 # 私有镜像库地址
|
||||
user: admin
|
||||
password: "V2ryStr@ngPss"
|
||||
is_default: true
|
||||
|
||||
##############################################################################
|
||||
|
||||
# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
|
||||
ignore_docker_version: true
|
||||
|
||||
# Set the name of the Kubernetes cluster
|
||||
cluster_name: rke-cluster
|
||||
|
||||
kubernetes_version: v1.20.4-rancher1-1
|
||||
|
||||
ssh_key_path: /root/.ssh/id_ed25519
|
||||
|
||||
# Enable running cri-dockerd
|
||||
# Up to Kubernetes 1.23, kubelet contained code called dockershim
|
||||
# to support Docker runtime. The replacement is called cri-dockerd
|
||||
# and should be enabled if you want to keep using Docker as your
|
||||
# container runtime
|
||||
# Only available to enable in Kubernetes 1.21 and higher
|
||||
enable_cri_dockerd: true
|
||||
|
||||
|
||||
services:
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: false
|
||||
interval_hours: 72
|
||||
retention: 3
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
gid: 0
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 0
|
||||
|
||||
kube-api:
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-controller
|
||||
service_cluster_ip_range: 172.29.0.0/16
|
||||
# Expose a different port range for NodePort services
|
||||
service_node_port_range: 30000-40000
|
||||
always_pull_images: true
|
||||
pod_security_policy: false
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Enable audit log to stdout
|
||||
audit-log-path: "-"
|
||||
# Increase number of delete workers
|
||||
delete-collection-workers: 3
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
# Using the EventRateLimit admission control enforces a limit on the number of events
|
||||
# that the API Server will accept in a given time period
|
||||
# Available as of v1.0.0
|
||||
event_rate_limit:
|
||||
enabled: false
|
||||
configuration:
|
||||
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
|
||||
kind: Configuration
|
||||
limits:
|
||||
- type: Server
|
||||
qps: 6000
|
||||
burst: 30000
|
||||
kube-controller:
|
||||
# CIDR pool used to assign IP addresses to pods in the cluster
|
||||
cluster_cidr: 172.28.0.0/16
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-api
|
||||
service_cluster_ip_range: 172.29.0.0/16
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Set the level of log output to debug-level
|
||||
v: 1
|
||||
# Enable RotateKubeletServerCertificate feature gate
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
# Enable TLS Certificates management
|
||||
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
|
||||
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
kubelet:
|
||||
# Base domain for the cluster
|
||||
cluster_domain: cluster.local
|
||||
# IP address for the DNS service endpoint
|
||||
cluster_dns_server: 172.29.0.10
|
||||
# Fail if swap is on
|
||||
fail_swap_on: false
|
||||
# Set max pods to 250 instead of default 110
|
||||
extra_binds:
|
||||
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
|
||||
extra_args:
|
||||
max-pods: 162
|
||||
# Optionally define additional volume binds to a service
|
||||
scheduler:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
kubeproxy:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
|
||||
authorization:
|
||||
mode: rbac
|
||||
|
||||
addon_job_timeout: 30
|
||||
|
||||
network:
|
||||
options:
|
||||
flannel_backend_type: host-gw
|
||||
flannel_iface: ens192
|
||||
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
plugin: flannel
|
||||
|
||||
# Specify network plugin-in (canal, calico, flannel, weave, or none)
|
||||
# network:
|
||||
# mtu: 1440
|
||||
# options:
|
||||
# flannel_backend_type: vxlan
|
||||
# plugin: calico
|
||||
# tolerations:
|
||||
# - key: "node.kubernetes.io/unreachable"
|
||||
# operator: "Exists"
|
||||
# effect: "NoExecute"
|
||||
# tolerationseconds: 300
|
||||
# - key: "node.kubernetes.io/not-ready"
|
||||
# operator: "Exists"
|
||||
# effect: "NoExecute"
|
||||
# tolerationseconds: 300
|
||||
|
||||
# Specify DNS provider (coredns or kube-dns)
|
||||
dns:
|
||||
provider: coredns
|
||||
nodelocal:
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
maxSurge: 15%
|
||||
linear_autoscaler_params:
|
||||
cores_per_replica: 0.34
|
||||
nodes_per_replica: 4
|
||||
prevent_single_point_failure: true
|
||||
min: 2
|
||||
max: 3
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/unreachable"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationseconds: 300
|
||||
|
||||
# Specify monitoring provider (metrics-server)
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 8
|
||||
|
||||
ingress:
|
||||
provider: nginx
|
||||
default_backend: true
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
extra_envs:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
node_selector:
|
||||
ingress-deploy: true
|
||||
options:
|
||||
use-forwarded-headers: "true"
|
||||
26
66-202505-浙江二级监管/doris-部署/0-节点lable.sh
Normal file
26
66-202505-浙江二级监管/doris-部署/0-节点lable.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
|
||||
|
||||
kubectl label nodes 192.168.10.17 192.168.10.18 192.168.10.19 doris-be-node=true
|
||||
|
||||
|
||||
# 2. 在每台节点创建存储目录
|
||||
for node in 192.168.10.17 192.168.10.18 192.168.10.19; do
|
||||
ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-be/storage && sudo chmod 777 /data/doris-be"
|
||||
ssh -p 2202 root@"$node" "ls /data/doris-be/"
|
||||
done
|
||||
|
||||
kubectl label nodes 192.168.10.16 doris-fe-node=true
|
||||
for node in 192.168.10.16; do
|
||||
ssh -p 2202 root@"$node" "sudo mkdir -p /data/doris-fe/storage && sudo chmod 777 /data/doris-fe"
|
||||
ssh -p 2202 root@"$node" "ls /data/dorisfe/"
|
||||
done
|
||||
|
||||
# uas的业务 保证防重复部署 只能在这几台
|
||||
kubectl label nodes 192.168.10.20 192.168.10.21 192.168.10.22 192.168.10.23 uavcloud.env=zjejpt-uas
|
||||
|
||||
# rabbitmq需要固定到特定的节点
|
||||
|
||||
kubectl label nodes 192.168.10.8 rabbitmq.node=master
|
||||
kubectl label nodes 192.168.10.8 emqx.node=master
|
||||
kubectl label nodes 192.168.10.9 redis.node=master
|
||||
102
66-202505-浙江二级监管/doris-部署/doris-all-service.yaml
Normal file
102
66-202505-浙江二级监管/doris-部署/doris-all-service.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
namespace: zjejpt-uas
|
||||
name: doris-cluster-be-internal
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be-internal
|
||||
spec:
|
||||
ports:
|
||||
- name: heartbeat-port
|
||||
protocol: TCP
|
||||
port: 9050
|
||||
targetPort: 9050
|
||||
selector:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: doris-cluster-be-service
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
spec:
|
||||
ports:
|
||||
- name: be-port
|
||||
protocol: TCP
|
||||
port: 9060
|
||||
targetPort: 9060
|
||||
nodePort: 32189
|
||||
- name: webserver-port
|
||||
protocol: TCP
|
||||
port: 8040
|
||||
targetPort: 8040
|
||||
nodePort: 31624
|
||||
- name: heartbeat-port
|
||||
protocol: TCP
|
||||
port: 9050
|
||||
targetPort: 9050
|
||||
nodePort: 31625
|
||||
- name: brpc-port
|
||||
protocol: TCP
|
||||
port: 8060
|
||||
targetPort: 8060
|
||||
nodePort: 31627
|
||||
selector:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
type: NodePort
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: doris-cluster-fe-internal
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
spec:
|
||||
ports:
|
||||
- name: query-port
|
||||
protocol: TCP
|
||||
port: 9030
|
||||
targetPort: 9030
|
||||
selector:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: doris-cluster-fe-service
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
spec:
|
||||
ports:
|
||||
- name: http-port
|
||||
protocol: TCP
|
||||
port: 8030
|
||||
targetPort: 8030
|
||||
nodePort: 31620
|
||||
- name: rpc-port
|
||||
protocol: TCP
|
||||
port: 9020
|
||||
targetPort: 9020
|
||||
nodePort: 31621
|
||||
- name: query-port
|
||||
protocol: TCP
|
||||
port: 9030
|
||||
targetPort: 9030
|
||||
nodePort: 31622
|
||||
- name: edit-log-port
|
||||
protocol: TCP
|
||||
port: 9010
|
||||
targetPort: 9010
|
||||
nodePort: 31623
|
||||
selector:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
type: NodePort
|
||||
---
|
||||
82
66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml
Normal file
82
66-202505-浙江二级监管/doris-部署/doris-be-configmap.yaml
Normal file
@@ -0,0 +1,82 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: doris-cluster-be-conf
|
||||
namespace: zyly
|
||||
labels:
|
||||
app.kubernetes.io/component: be
|
||||
data:
|
||||
be.conf: >
|
||||
CUR_DATE=`date +%Y%m%d-%H%M%S`
|
||||
|
||||
# Log dir
|
||||
LOG_DIR="${DORIS_HOME}/log/"
|
||||
|
||||
# For jdk 8
|
||||
JAVA_OPTS="-Dfile.encoding=UTF-8 -Xmx2048m -DlogPath=$LOG_DIR/jni.log -Xloggc:$LOG_DIR/be.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Djavax.security.auth.useSubjectCredsOnly=false -Dsun.security.krb5.debug=true -Dsun.java.command=DorisBE -XX:-CriticalJNINatives"
|
||||
|
||||
# Set your own JAVA_HOME
|
||||
# JAVA_HOME=/path/to/jdk/
|
||||
|
||||
# https://github.com/apache/doris/blob/master/docs/zh-CN/community/developer-guide/debug-tool.md#jemalloc-heap-profile
|
||||
# https://jemalloc.net/jemalloc.3.html jemalloc 内存分配器设置参数
|
||||
JEMALLOC_CONF="percpu_arena:percpu,background_thread:true,metadata_thp:auto,muzzy_decay_ms:15000,dirty_decay_ms:15000,oversize_threshold:0,prof:false,lg_prof_interval:32,lg_prof_sample:19,prof_gdump:false,prof_accum:false,prof_leak:false,prof_final:false"
|
||||
JEMALLOC_PROF_PRFIX=""
|
||||
|
||||
# ports for admin, web, heartbeat service
|
||||
be_port = 9060
|
||||
webserver_port = 8040
|
||||
heartbeat_service_port = 9050
|
||||
brpc_port = 8060
|
||||
arrow_flight_sql_port = -1
|
||||
|
||||
# HTTPS configures
|
||||
enable_https = false
|
||||
# path of certificate in PEM format.
|
||||
#ssl_certificate_path = "$DORIS_HOME/conf/cert.pem"
|
||||
# path of private key in PEM format.
|
||||
#ssl_private_key_path = "$DORIS_HOME/conf/key.pem"
|
||||
|
||||
# Choose one if there are more than one ip except loopback address.
|
||||
# Note that there should at most one ip match this list.
|
||||
# If no ip match this rule, will choose one randomly.
|
||||
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
|
||||
# Default value is empty.
|
||||
# priority_networks = 10.10.10.0/24;192.168.0.0/16
|
||||
|
||||
# data root path, separate by ';'
|
||||
# You can specify the storage type for each root path, HDD (cold data) or SSD (hot data)
|
||||
# eg:
|
||||
# storage_root_path = /home/disk1/doris;/home/disk2/doris;/home/disk2/doris
|
||||
# storage_root_path = /home/disk1/doris,medium:SSD;/home/disk2/doris,medium:SSD;/home/disk2/doris,medium:HDD
|
||||
# /home/disk2/doris,medium:HDD(default)
|
||||
#
|
||||
# you also can specify the properties by setting '<property>:<value>', separate by ','
|
||||
# property 'medium' has a higher priority than the extension of path
|
||||
#
|
||||
# Default value is ${DORIS_HOME}/storage, you should create it by hand.
|
||||
# storage_root_path = ${DORIS_HOME}/storage
|
||||
|
||||
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
|
||||
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
|
||||
|
||||
# Advanced configurations
|
||||
# INFO, WARNING, ERROR, FATAL
|
||||
sys_log_level = INFO
|
||||
# sys_log_roll_mode = SIZE-MB-1024
|
||||
# sys_log_roll_num = 10
|
||||
# sys_log_verbose_modules = *
|
||||
# log_buffer_level = -1
|
||||
|
||||
# aws sdk log level
|
||||
# Off = 0,
|
||||
# Fatal = 1,
|
||||
# Error = 2,
|
||||
# Warn = 3,
|
||||
# Info = 4,
|
||||
# Debug = 5,
|
||||
# Trace = 6
|
||||
# Default to turn off aws sdk log, because aws sdk errors that need to be cared will be output through Doris logs
|
||||
#aws_log_level=0
|
||||
## If you are not running in aws cloud, you can disable EC2 metadata
|
||||
#AWS_EC2_METADATA_DISABLED=false
|
||||
208
66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml
Normal file
208
66-202505-浙江二级监管/doris-部署/doris-be-statefulset.yaml
Normal file
@@ -0,0 +1,208 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: doris-cluster-be
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
template:
|
||||
metadata:
|
||||
name: doris-cluster-be
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- "192.168.10.17"
|
||||
- "192.168.10.18"
|
||||
- "192.168.10.19"
|
||||
- key: doris-be-node
|
||||
operator: Exists
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values: [ "doris-cluster-be" ]
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
volumes:
|
||||
- name: be-local-storage
|
||||
hostPath:
|
||||
path: /data/doris-be/storage
|
||||
type: DirectoryOrCreate
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: labels
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.labels
|
||||
- path: annotations
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.annotations
|
||||
defaultMode: 420
|
||||
initContainers:
|
||||
- name: pod-ordinal-init
|
||||
image: 192.168.10.3:8033/cmii/alpine:1.0.0
|
||||
command: [ 'sh', '-c' ]
|
||||
args:
|
||||
- |
|
||||
# 获取Pod序号
|
||||
POD_ORDINAL=$(echo ${POD_NAME} | awk -F- '{print $NF}')
|
||||
|
||||
# 通过节点名称匹配序号
|
||||
case ${NODE_NAME} in
|
||||
"192.168.10.17") ORDINAL=0 ;;
|
||||
"192.168.10.18") ORDINAL=1 ;;
|
||||
"192.168.10.19") ORDINAL=2 ;;
|
||||
esac
|
||||
|
||||
# 验证序号匹配
|
||||
if [ "$POD_ORDINAL" != "$ORDINAL" ]; then
|
||||
echo "ERROR: Pod ordinal ${POD_ORDINAL} not match node ${NODE_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: default-init
|
||||
image: '192.168.10.3:8033/cmii/alpine:1.0.0'
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
- '-c'
|
||||
- sysctl -w vm.max_map_count=2000000 && swapoff -a
|
||||
resources:
|
||||
limits:
|
||||
cpu: '2'
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: '1'
|
||||
memory: 1Gi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
- name: be
|
||||
image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
|
||||
command:
|
||||
- /opt/apache-doris/be_entrypoint.sh
|
||||
args:
|
||||
- $(ENV_FE_ADDR)
|
||||
ports:
|
||||
- name: be-port
|
||||
containerPort: 9060
|
||||
protocol: TCP
|
||||
- name: webserver-port
|
||||
containerPort: 8040
|
||||
protocol: TCP
|
||||
- name: heartbeat-port
|
||||
containerPort: 9050
|
||||
protocol: TCP
|
||||
- name: brpc-port
|
||||
containerPort: 8060
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.name
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.hostIP
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONFIGMAP_MOUNT_PATH
|
||||
value: /etc/doris
|
||||
- name: USER
|
||||
value: root
|
||||
- name: DORIS_ROOT
|
||||
value: /opt/apache-doris
|
||||
- name: ENV_FE_ADDR
|
||||
value: doris-cluster-fe-service
|
||||
- name: FE_QUERY_PORT
|
||||
value: '9030'
|
||||
resources:
|
||||
limits:
|
||||
cpu: '16'
|
||||
memory: 32Gi
|
||||
requests:
|
||||
cpu: '8'
|
||||
memory: 32Gi
|
||||
volumeMounts:
|
||||
- name: be-local-storage
|
||||
mountPath: /opt/apache-doris/be/storage
|
||||
- name: be-local-storage
|
||||
mountPath: /opt/apache-doris/be/log
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 9050
|
||||
initialDelaySeconds: 80
|
||||
timeoutSeconds: 180
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 8040
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: 9050
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /opt/apache-doris/be_prestop.sh
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
serviceName: doris-cluster-be-internal
|
||||
podManagementPolicy: Parallel
|
||||
@@ -0,0 +1,188 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: doris-cluster-be
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
template:
|
||||
metadata:
|
||||
name: doris-cluster-be
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-be
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
volumes:
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: labels
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.labels
|
||||
- path: annotations
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.annotations
|
||||
defaultMode: 420
|
||||
- name: doris-cluster-be-conf
|
||||
configMap:
|
||||
name: doris-cluster-be-conf
|
||||
defaultMode: 420
|
||||
initContainers:
|
||||
- name: default-init
|
||||
image: '192.168.10.3:8033/cmii/alpine:1.0.0'
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
- '-c'
|
||||
- sysctl -w vm.max_map_count=2000000 && swapoff -a
|
||||
resources:
|
||||
limits:
|
||||
cpu: '2'
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: '1'
|
||||
memory: 1Gi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
- name: be
|
||||
image: '192.168.10.3:8033/cmii/doris.be-amd64:2.1.6'
|
||||
command:
|
||||
- /opt/apache-doris/be_entrypoint.sh
|
||||
args:
|
||||
- $(ENV_FE_ADDR)
|
||||
ports:
|
||||
- name: be-port
|
||||
containerPort: 9060
|
||||
protocol: TCP
|
||||
- name: webserver-port
|
||||
containerPort: 8040
|
||||
protocol: TCP
|
||||
- name: heartbeat-port
|
||||
containerPort: 9050
|
||||
protocol: TCP
|
||||
- name: brpc-port
|
||||
containerPort: 8060
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.name
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.hostIP
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONFIGMAP_MOUNT_PATH
|
||||
value: /etc/doris
|
||||
- name: USER
|
||||
value: root
|
||||
- name: DORIS_ROOT
|
||||
value: /opt/apache-doris
|
||||
- name: ENV_FE_ADDR
|
||||
value: doris-cluster-fe-service
|
||||
- name: FE_QUERY_PORT
|
||||
value: '9030'
|
||||
resources:
|
||||
limits:
|
||||
cpu: '16'
|
||||
memory: 32Gi
|
||||
requests:
|
||||
cpu: '8'
|
||||
memory: 32Gi
|
||||
volumeMounts:
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
- name: be-storage
|
||||
mountPath: /opt/apache-doris/be/storage
|
||||
- name: be-storage
|
||||
mountPath: /opt/apache-doris/be/log
|
||||
- name: doris-cluster-be-conf
|
||||
mountPath: /etc/doris
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 9050
|
||||
initialDelaySeconds: 80
|
||||
timeoutSeconds: 180
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 8040
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: 9050
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /opt/apache-doris/be_prestop.sh
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/component
|
||||
operator: In
|
||||
values: [ "doris-cluster-be" ]
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: doris-be-node
|
||||
operator: In
|
||||
values: [ "true" ]
|
||||
schedulerName: default-scheduler
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: be-storage
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "local-storage"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1500Gi
|
||||
serviceName: doris-cluster-be-internal
|
||||
podManagementPolicy: Parallel
|
||||
67
66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml
Normal file
67
66-202505-浙江二级监管/doris-部署/doris-fe-configmap.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: doris-cluster-fe-conf
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: fe
|
||||
data:
|
||||
fe.conf: |
|
||||
#####################################################################
|
||||
## The uppercase properties are read and exported by bin/start_fe.sh.
|
||||
## To see all Frontend configurations,
|
||||
## see fe/src/org/apache/doris/common/Config.java
|
||||
#####################################################################
|
||||
|
||||
CUR_DATE=`date +%Y%m%d-%H%M%S`
|
||||
|
||||
# Log dir
|
||||
LOG_DIR = ${DORIS_HOME}/log
|
||||
|
||||
# For jdk 8
|
||||
JAVA_OPTS="-Dfile.encoding=UTF-8 -Djavax.security.auth.useSubjectCredsOnly=false -Xss4m -Xmx8192m -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:$LOG_DIR/log/fe.gc.log.$CUR_DATE -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=50M -Dlog4j2.formatMsgNoLookups=true"
|
||||
|
||||
# Set your own JAVA_HOME
|
||||
# JAVA_HOME=/path/to/jdk/
|
||||
|
||||
##
|
||||
## the lowercase properties are read by main program.
|
||||
##
|
||||
|
||||
# store metadata, must be created before start FE.
|
||||
# Default value is ${DORIS_HOME}/doris-meta
|
||||
# meta_dir = ${DORIS_HOME}/doris-meta
|
||||
|
||||
# Default dirs to put jdbc drivers,default value is ${DORIS_HOME}/jdbc_drivers
|
||||
# jdbc_drivers_dir = ${DORIS_HOME}/jdbc_drivers
|
||||
|
||||
http_port = 8030
|
||||
rpc_port = 9020
|
||||
query_port = 9030
|
||||
edit_log_port = 9010
|
||||
arrow_flight_sql_port = -1
|
||||
|
||||
# Choose one if there are more than one ip except loopback address.
|
||||
# Note that there should at most one ip match this list.
|
||||
# If no ip match this rule, will choose one randomly.
|
||||
# use CIDR format, e.g. 10.10.10.0/24 or IP format, e.g. 10.10.10.1
|
||||
# Default value is empty.
|
||||
# priority_networks = 10.10.10.0/24;192.168.0.0/16
|
||||
|
||||
# Advanced configurations
|
||||
# log_roll_size_mb = 1024
|
||||
# INFO, WARN, ERROR, FATAL
|
||||
sys_log_level = INFO
|
||||
# NORMAL, BRIEF, ASYNC,FE 日志的输出模式,其中 NORMAL 为默认的输出模式,日志同步输出且包含位置信息。ASYNC 默认是日志异步输出且包含位置信息。 BRIEF 模式是日志异步输出但不包含位置信息。三种日志输出模式的性能依次递增
|
||||
sys_log_mode = ASYNC
|
||||
# sys_log_roll_num = 10
|
||||
# sys_log_verbose_modules = org.apache.doris
|
||||
# audit_log_dir = $LOG_DIR
|
||||
# audit_log_modules = slow_query, query
|
||||
# audit_log_roll_num = 10
|
||||
# meta_delay_toleration_second = 10
|
||||
# qe_max_connection = 1024
|
||||
# qe_query_timeout_second = 300
|
||||
# qe_slow_log_ms = 5000
|
||||
#Fully Qualified Domain Name,完全限定域名,开启后各节点之间通信基于FQDN
|
||||
enable_fqdn_mode = true
|
||||
160
66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml
Normal file
160
66-202505-浙江二级监管/doris-部署/doris-fe-statusfulset.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: doris-cluster-fe
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
template:
|
||||
metadata:
|
||||
name: doris-cluster-fe
|
||||
labels:
|
||||
app.kubernetes.io/component: doris-cluster-fe
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
volumes:
|
||||
- name: fe-local-storage
|
||||
hostPath:
|
||||
path: /data/doris-fe/storage
|
||||
type: DirectoryOrCreate
|
||||
- name: podinfo
|
||||
downwardAPI:
|
||||
items:
|
||||
- path: labels
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.labels
|
||||
- path: annotations
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.annotations
|
||||
defaultMode: 420
|
||||
- name: doris-cluster-fe-conf
|
||||
configMap:
|
||||
name: doris-cluster-fe-conf
|
||||
defaultMode: 420
|
||||
containers:
|
||||
- name: doris-cluster-fe
|
||||
image: '192.168.10.3:8033/cmii/doris.fe-amd64:2.1.6'
|
||||
command:
|
||||
- /opt/apache-doris/fe_entrypoint.sh
|
||||
args:
|
||||
- $(ENV_FE_ADDR)
|
||||
ports:
|
||||
- name: http-port
|
||||
containerPort: 8030
|
||||
protocol: TCP
|
||||
- name: rpc-port
|
||||
containerPort: 9020
|
||||
protocol: TCP
|
||||
- name: query-port
|
||||
containerPort: 9030
|
||||
protocol: TCP
|
||||
- name: edit-log-port
|
||||
containerPort: 9010
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.name
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: HOST_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.hostIP
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONFIGMAP_MOUNT_PATH
|
||||
value: /etc/doris
|
||||
- name: USER
|
||||
value: root
|
||||
- name: DORIS_ROOT
|
||||
value: /opt/apache-doris
|
||||
- name: ENV_FE_ADDR
|
||||
value: doris-cluster-fe-service
|
||||
- name: FE_QUERY_PORT
|
||||
value: '9030'
|
||||
- name: ELECT_NUMBER
|
||||
value: '3'
|
||||
resources:
|
||||
limits:
|
||||
cpu: '16'
|
||||
memory: 32Gi
|
||||
requests:
|
||||
cpu: '8'
|
||||
memory: 32Gi
|
||||
volumeMounts:
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
- name: fe-local-storage
|
||||
mountPath: /opt/apache-doris/fe/log
|
||||
- name: fe-local-storage
|
||||
mountPath: /opt/apache-doris/fe/doris-meta
|
||||
- name: doris-cluster-fe-conf
|
||||
mountPath: /etc/doris
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 9030
|
||||
initialDelaySeconds: 80
|
||||
timeoutSeconds: 180
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 8030
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: 9030
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /opt/apache-doris/fe_prestop.sh
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- "192.168.10.16"
|
||||
- key: doris-fe-node
|
||||
operator: Exists
|
||||
schedulerName: default-scheduler
|
||||
serviceName: doris-cluster-fe-internal
|
||||
podManagementPolicy: Parallel
|
||||
79
66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml
Normal file
79
66-202505-浙江二级监管/doris-部署/doris-local-pv.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-storage
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Retain
|
||||
allowedTopologies:
|
||||
- matchLabelExpressions:
|
||||
- key: doris-be-node
|
||||
values: ["true"]
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: doris-be-pv-node1
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1500Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /data/doris-be/storage
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: ["192.168.10.17"]
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: doris-be-pv-node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1500Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /data/doris-be/storage
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: ["192.168.10.18"]
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: doris-be-pv-node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1500Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /data/doris-be/storage
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: ["192.168.10.19"]
|
||||
|
||||
103
66-202505-浙江二级监管/install_docker_offline.sh
Normal file
103
66-202505-浙江二级监管/install_docker_offline.sh
Normal file
@@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# 定义变量
|
||||
DOCKER_TAR="docker-25.0.0.tgz"
|
||||
SYSTEMD_DIR="/lib/systemd/system"
|
||||
BIN_DIR="/usr/local/bin"
|
||||
|
||||
# 0. 停止旧版本服务(如有)
|
||||
sudo systemctl stop docker containerd.socket containerd 2>/dev/null || true
|
||||
|
||||
# 1. 解压Docker二进制包
|
||||
echo "解压Docker二进制包..."
|
||||
sudo tar -xzvf ${DOCKER_TAR} -C ${BIN_DIR} --strip-components=1
|
||||
|
||||
# 2. 确保二进制文件可执行
|
||||
sudo chmod +x ${BIN_DIR}/{containerd,ctr,dockerd,docker,runc}
|
||||
|
||||
# 3. 配置containerd.service
|
||||
echo "配置containerd服务..."
|
||||
cat > ${SYSTEMD_DIR}/containerd.service <<'EOF'
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=network.target local-fs.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=${BIN_DIR}/containerd
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# 4. 配置docker.service
|
||||
echo "配置Docker服务..."
|
||||
cat > ${SYSTEMD_DIR}/docker.service <<'EOF'
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=https://docs.docker.com
|
||||
After=network-online.target docker.socket containerd.service
|
||||
Wants=network-online.target
|
||||
Requires=docker.socket containerd.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=${BIN_DIR}/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
TimeoutSec=0
|
||||
RestartSec=2
|
||||
Restart=always
|
||||
StartLimitBurst=3
|
||||
StartLimitInterval=60s
|
||||
LimitNOFILE=infinity
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
OOMScoreAdjust=-500
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# 5. 配置docker.socket
|
||||
echo "配置Docker Socket..."
|
||||
cat > ${SYSTEMD_DIR}/docker.socket <<'EOF'
|
||||
[Unit]
|
||||
Description=Docker Socket for the API
|
||||
PartOf=docker.service
|
||||
|
||||
[Socket]
|
||||
ListenStream=/var/run/docker.sock
|
||||
SocketMode=0660
|
||||
SocketUser=root
|
||||
SocketGroup=docker
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
||||
EOF
|
||||
|
||||
# 6. 创建docker用户组
|
||||
echo "配置用户组..."
|
||||
sudo groupadd -f docker
|
||||
sudo usermod -aG docker $USER 2>/dev/null && echo "已添加用户 $USER 到docker组"
|
||||
|
||||
# 7. 启用并启动服务
|
||||
echo "启动服务..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable --now containerd docker
|
||||
|
||||
# 8. 验证安装
|
||||
echo -e "\n验证状态:"
|
||||
sudo systemctl status containerd docker | grep "Active:"
|
||||
echo -e "\nDocker版本:"
|
||||
${BIN_DIR}/docker --version
|
||||
143
66-202505-浙江二级监管/sshd_config
Normal file
143
66-202505-浙江二级监管/sshd_config
Normal file
@@ -0,0 +1,143 @@
|
||||
# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $
|
||||
|
||||
# This is the sshd server system-wide configuration file. See
|
||||
# sshd_config(5) for more information.
|
||||
|
||||
# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
|
||||
|
||||
# The strategy used for options in the default sshd_config shipped with
|
||||
# OpenSSH is to specify options with their default value where
|
||||
# possible, but leave them commented. Uncommented options override the
|
||||
# default value.
|
||||
|
||||
# To modify the system-wide sshd configuration, create a *.conf file under
|
||||
# /etc/ssh/sshd_config.d/ which will be automatically included below
|
||||
#Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
# If you want to change the port on a SELinux system, you have to tell
|
||||
# SELinux about this change.
|
||||
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
|
||||
#
|
||||
Port 2202
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
HostKey /etc/ssh/ssh_host_rsa_key
|
||||
HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Ciphers and keying
|
||||
#RekeyLimit default none
|
||||
|
||||
# Logging
|
||||
#SyslogFacility AUTH
|
||||
SyslogFacility AUTH
|
||||
#LogLevel INFO
|
||||
|
||||
# Authentication:
|
||||
|
||||
#LoginGraceTime 2m
|
||||
PermitRootLogin yes
|
||||
#StrictModes yes
|
||||
#MaxAuthTries 6
|
||||
#MaxSessions 10
|
||||
|
||||
#PubkeyAuthentication yes
|
||||
|
||||
# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
|
||||
# but this is overridden so installations will only check .ssh/authorized_keys
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
|
||||
#AuthorizedPrincipalsFile none
|
||||
|
||||
#AuthorizedKeysCommand none
|
||||
#AuthorizedKeysCommandUser nobody
|
||||
|
||||
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
|
||||
#HostbasedAuthentication no
|
||||
# Change to yes if you don't trust ~/.ssh/known_hosts for
|
||||
# HostbasedAuthentication
|
||||
#IgnoreUserKnownHosts no
|
||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
||||
#IgnoreRhosts yes
|
||||
|
||||
# To disable tunneled clear text passwords, change to no here!
|
||||
PasswordAuthentication yes
|
||||
#PermitEmptyPasswords no
|
||||
|
||||
# Change to no to disable s/key passwords
|
||||
KbdInteractiveAuthentication no
|
||||
|
||||
# Kerberos options
|
||||
#KerberosAuthentication no
|
||||
#KerberosOrLocalPasswd yes
|
||||
#KerberosTicketCleanup yes
|
||||
#KerberosGetAFSToken no
|
||||
#KerberosUseKuserok yes
|
||||
|
||||
# GSSAPI options
|
||||
GSSAPIAuthentication yes
|
||||
GSSAPICleanupCredentials no
|
||||
#GSSAPIStrictAcceptorCheck yes
|
||||
#GSSAPIKeyExchange no
|
||||
#GSSAPIEnablek5users no
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
# and session processing. If this is enabled, PAM authentication will
|
||||
# be allowed through the KbdInteractiveAuthentication and
|
||||
# PasswordAuthentication. Depending on your PAM configuration,
|
||||
# PAM authentication via KbdInteractiveAuthentication may bypass
|
||||
# the setting of "PermitRootLogin without-password".
|
||||
# If you just want the PAM account and session checks to run without
|
||||
# PAM authentication, then enable this but set PasswordAuthentication
|
||||
# and KbdInteractiveAuthentication to 'no'.
|
||||
# WARNING: 'UsePAM no' is not supported in openEuler and may cause several
|
||||
# problems.
|
||||
UsePAM yes
|
||||
|
||||
#AllowAgentForwarding yes
|
||||
#AllowTcpForwarding yes
|
||||
#GatewayPorts no
|
||||
X11Forwarding yes
|
||||
#X11DisplayOffset 10
|
||||
#X11UseLocalhost yes
|
||||
#PermitTTY yes
|
||||
PrintMotd no
|
||||
#PrintLastLog yes
|
||||
#TCPKeepAlive yes
|
||||
#PermitUserEnvironment no
|
||||
#Compression delayed
|
||||
#ClientAliveInterval 0
|
||||
#ClientAliveCountMax 3
|
||||
UseDNS no
|
||||
#PidFile /var/run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
#ChrootDirectory none
|
||||
#VersionAddendum none
|
||||
|
||||
# no default banner path
|
||||
#Banner none
|
||||
|
||||
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
|
||||
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
|
||||
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
|
||||
AcceptEnv XMODIFIERS
|
||||
|
||||
# override default of no subsystems
|
||||
Subsystem sftp /usr/libexec/openssh/sftp-server -l INFO -f AUTH
|
||||
|
||||
# Example of overriding settings on a per-user basis
|
||||
#Match User anoncvs
|
||||
# X11Forwarding no
|
||||
# PermitTTY no
|
||||
# ForceCommand cvs server
|
||||
#CheckUserSplash yes
|
||||
|
||||
PubkeyAuthentication yes
|
||||
RSAAuthentication yes
|
||||
IgnoreRhosts yes
|
||||
PermitEmptyPasswords no
|
||||
Banner /etc/issue.net
|
||||
AllowTcpForwarding yes
|
||||
55
66-202505-浙江二级监管/分块压缩合.md
Normal file
55
66-202505-浙江二级监管/分块压缩合.md
Normal file
@@ -0,0 +1,55 @@
|
||||
|
||||
|
||||
|
||||
下面是压缩Docker镜像为分块压缩包及合并的命令:
|
||||
|
||||
---
|
||||
|
||||
### **1. 压缩并分割镜像**
|
||||
#### **推荐方式:直接通过管道压缩并分块**
|
||||
使用 `gzip` 压缩(速度快,中等压缩率):
|
||||
```bash
|
||||
docker save <IMAGE_NAME:TAG> | gzip | split -b 5G - image_part_.gz
|
||||
```
|
||||
或使用 `xz` 压缩(高压缩率,速度慢):
|
||||
```bash
|
||||
docker save <IMAGE_NAME:TAG> | xz -T0 | split -b 5G - image_part_.xz
|
||||
```
|
||||
**参数说明**:
|
||||
- `<IMAGE_NAME:TAG>`:替换为实际的镜像名称和标签。
|
||||
- `split -b 5G`:将输入流分割为每块最大5GB。
|
||||
- `image_part_.gz` 或 `image_part_.xz`:分块文件前缀,生成的文件名类似 `image_part_.gz.aa`、`image_part_.gz.ab` 等。
|
||||
|
||||
---
|
||||
|
||||
### **2. 合并分块并还原镜像**
|
||||
#### **gzip 压缩分块的合并与加载**
|
||||
```bash
|
||||
cat image_part_.gz.* | gunzip | docker load
|
||||
```
|
||||
#### **xz 压缩分块的合并与加载**
|
||||
```bash
|
||||
cat image_part_.xz.* | xz -d | docker load
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### **工作原理**
|
||||
1. **压缩分块**:
|
||||
- `docker save` 输出镜像的 TAR 存档到标准输出。
|
||||
- 通过管道将 TAR 数据实时压缩(`gzip` 或 `xz`)。
|
||||
- `split` 将压缩后的流按 `5G` 大小分割为多个文件。
|
||||
|
||||
2. **合并还原**:
|
||||
- `cat` 按顺序合并所有分块文件。
|
||||
- `gunzip` 或 `xz -d` 解压合并后的流。
|
||||
- `docker load` 从解压后的 TAR 流中加载镜像。
|
||||
|
||||
---
|
||||
|
||||
### **注意事项**
|
||||
- **分块命名**:`split` 默认生成 `aa`, `ab` 等后缀。若分块超过几百个,需用 `-a <长度>` 指定后缀长度(如 `-a 3` 生成 `001`)。
|
||||
- **磁盘空间**:合并时需要足够的临时空间存储解压后的完整 TAR 数据(如原镜像为24GB,需至少24GB空间)。
|
||||
- **压缩选择**:
|
||||
- `gzip`:速度较快,适合快速处理。
|
||||
- `xz`:压缩率更高(尤其适合二进制数据),但需要更多时间和CPU资源。
|
||||
20
66-202505-浙江二级监管/压缩文件包.txt
Normal file
20
66-202505-浙江二级监管/压缩文件包.txt
Normal file
@@ -0,0 +1,20 @@
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-gateway=2.1-demo-20250527-licence.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uas-lifecycle=2.1-demo-20250527-licence.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-notice=pro-6.0.8.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-material-warehouse=6.2.0-050701.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uasms=2.1-demo-20250527.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-platform-uas=2.1-demo-20250527.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uavms-pyfusion=6.3.6.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-iot-dispatcher=6.2.0-focus.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-sense-adapter=6.2.0-250415.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-uav-watchdog=1.0.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-live-operator=5.2.0.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=srs=v5.0.195-arm.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=cmii-srs-oss-adaptor=2023-SA-skip-CHL.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/docker=cmii=doris.fe-ubuntu=2.1.6.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/cmlc=cmii=doris.be-amd64=2.1.6.tar.gz
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzaa
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzab
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/uavms21-demo/image_part_.gzac
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/v5.7.0-x86/gb28181_x86_2.7.3_20250414.img.tar
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/gb28181/docker-gb28181.tar
|
||||
1115
66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml
Normal file
1115
66-202505-浙江二级监管/已有部署备份/all-statefull_sets-zjyd.yaml
Normal file
File diff suppressed because it is too large
Load Diff
19
66-202505-浙江二级监管/已有部署备份/install_auth.sh
Normal file
19
66-202505-浙江二级监管/已有部署备份/install_auth.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
scp -P 2202 /root/wdd/install/auth_file.json root@192.168.10.2:/data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog/
|
||||
|
||||
ssh -p 2202 root@192.168.10.2 "ls /data/nfs_data/zjejpt-uas-nfs-backend-log-pvc-pvc-6712a14b-2a03-48d6-9349-42b4642be91e/zjejpt-uas/cmii-uav-watchdog"
|
||||
|
||||
|
||||
# 生成授权文件
|
||||
curl http://localhost:8080/api/authorization/generate
|
||||
|
||||
# 处理授权码
|
||||
curl -X POST \
|
||||
http://localhost:8080/api/authorization/auth \
|
||||
-H 'Content-Type: application/json' \
|
||||
--data-binary @auth_file.json
|
||||
|
||||
|
||||
#
|
||||
curl http://localhost:8080/api/authorization/hosts
|
||||
144
66-202505-浙江二级监管/已有部署备份/nginx-web.conf
Normal file
144
66-202505-浙江二级监管/已有部署备份/nginx-web.conf
Normal file
@@ -0,0 +1,144 @@
|
||||
|
||||
###### 监管平台转发
|
||||
location ^~ /uas {
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
proxy_pass http://localhost:30500;
|
||||
client_max_body_size 5120m;
|
||||
client_body_buffer_size 5120m;
|
||||
client_body_timeout 6000s;
|
||||
proxy_send_timeout 10000s;
|
||||
proxy_read_timeout 10000s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_max_temp_file_size 5120m;
|
||||
proxy_request_buffering on;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.zjejpt-uas.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location / {
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
proxy_pass http://localhost:30500;
|
||||
client_max_body_size 5120m;
|
||||
client_body_buffer_size 5120m;
|
||||
client_body_timeout 6000s;
|
||||
proxy_send_timeout 10000s;
|
||||
proxy_read_timeout 10000s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_max_temp_file_size 5120m;
|
||||
proxy_request_buffering on;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.zjyd.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_AMapService/v4/map/styles {
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://webapi.amap.com/v4/ap/styles;
|
||||
}
|
||||
|
||||
location /_AMapService/ {
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://restapi.amap.com/;
|
||||
}
|
||||
|
||||
location /rtc/v1/ {
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
proxy_pass http://192.168.10.3:30985/rtc/v1/;
|
||||
}
|
||||
|
||||
|
||||
|
||||
### 视频国标GB28181 ###
|
||||
|
||||
# location /zlm/flv/ {
|
||||
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
# #开启HTTP严格传输安全HSTS
|
||||
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
# add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
# proxy_pass http://192.168.10.25:7088/;
|
||||
# }
|
||||
# location /zlm/hls/ {
|
||||
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
# #开启HTTP严格传输安全HSTS
|
||||
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
# add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
# proxy_pass http://192.168.10.25:7088/zlm/hls/;
|
||||
# }
|
||||
# location /index/api/ {
|
||||
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
# #开启HTTP严格传输安全HSTS
|
||||
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
# add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
# add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
# proxy_pass http://192.168.10.25:7088/index/api/;
|
||||
# }
|
||||
|
||||
|
||||
#location /video_feed {
|
||||
# proxy_pass http://192.168.10.12:5000;
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_set_header Connection "upgrade";
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_cache_bypass $http_upgrade;
|
||||
#}
|
||||
#location /video_person {
|
||||
# proxy_pass http://192.168.10.12:5001;
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_set_header Connection "upgrade";
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_cache_bypass $http_upgrade;
|
||||
#}
|
||||
#location /video {
|
||||
# #######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
# add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
# #开启HTTP严格传输安全HSTS
|
||||
# add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload" always;
|
||||
# alias /data/test/;
|
||||
# index 10.mp4;
|
||||
#}
|
||||
|
||||
#location ~ ^/\w*/actuator/ {
|
||||
# return 403;
|
||||
#}
|
||||
|
||||
location ~ ^/.*/(actuator|swagger-resources|api-docs|health).* {
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
return 404;
|
||||
}
|
||||
274
66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf
Normal file
274
66-202505-浙江二级监管/已有部署备份/nginx-端口转发.conf
Normal file
@@ -0,0 +1,274 @@
|
||||
user www www;
|
||||
worker_processes auto;
|
||||
|
||||
error_log logs/error.log warn;
|
||||
pid /var/run/nginx/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 65535;
|
||||
}
|
||||
|
||||
stream{
|
||||
include /data/nginx/conf/blacklist.conf;
|
||||
include /data/nginx/conf/blacklist_zhejiang.conf;
|
||||
deny all;
|
||||
#飞行数据-mqtt
|
||||
upstream tcp31883{
|
||||
server 127.0.0.1:32883; #中移凌云飞行数据
|
||||
}
|
||||
server{
|
||||
listen 31883;
|
||||
proxy_pass tcp31883;
|
||||
}
|
||||
|
||||
#飞行数据-mqtt-websocket
|
||||
upstream tcp38083{
|
||||
server 127.0.0.1:39083;
|
||||
}
|
||||
server{
|
||||
listen 38083;
|
||||
proxy_pass tcp38083;
|
||||
}
|
||||
|
||||
#视频流媒体-RTMP
|
||||
upstream tcp31935{
|
||||
server 127.0.0.1:32935;
|
||||
}
|
||||
server{
|
||||
listen 31935;
|
||||
proxy_pass tcp31935;
|
||||
}
|
||||
|
||||
#视频流媒体-WEBRTC
|
||||
upstream udp30090{
|
||||
server 127.0.0.1:31090;
|
||||
}
|
||||
server{
|
||||
listen 30090 udp;
|
||||
proxy_pass udp30090;
|
||||
}
|
||||
|
||||
#视频流播放TCP端口
|
||||
#upstream tcp30080{
|
||||
# server 127.0.0.1:31080;
|
||||
#}
|
||||
#server{
|
||||
# listen 30080;
|
||||
# proxy_pass tcp30080;
|
||||
#}
|
||||
|
||||
#rtsp-控制TCP端口
|
||||
#upstream tcp30554{
|
||||
# server 127.0.0.1:32554;
|
||||
#}
|
||||
#server{
|
||||
# listen 30554;
|
||||
# proxy_pass tcp30554;
|
||||
#}
|
||||
|
||||
#rtsp-数据TCP端口
|
||||
#upstream tcp30556{
|
||||
# server 127.0.0.1:32556;
|
||||
#}
|
||||
#server{
|
||||
# listen 30556;
|
||||
# proxy_pass tcp30556;
|
||||
#}
|
||||
|
||||
#rtsp-数据UDP端口
|
||||
#upstream udp30556{
|
||||
# server 127.0.0.1:32556;
|
||||
#}
|
||||
#server{
|
||||
# listen 30556 udp;
|
||||
# proxy_pass udp30556;
|
||||
#}
|
||||
|
||||
#模拟数据测试UDP端口
|
||||
#upstream udp30556{
|
||||
# server 127.0.0.1:31556;
|
||||
#}
|
||||
#server{
|
||||
# listen 30556 udp;
|
||||
# proxy_pass udp30556;
|
||||
#}
|
||||
|
||||
#RabbitMQ控制台端口
|
||||
# server{
|
||||
# listen 32002;
|
||||
# proxy_pass 192.168.10.11:15672;
|
||||
# }
|
||||
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /data/nginx/conf/blacklist.conf;
|
||||
include /data/nginx/conf/blacklist_zhejiang.conf;
|
||||
deny all;
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
## 去除版本信息 ##
|
||||
server_tokens off;
|
||||
#error日志更换
|
||||
#fastcgi_intercept_errors on;
|
||||
error_log logs/error.log warn;
|
||||
#####
|
||||
|
||||
sendfile on;
|
||||
keepalive_timeout 60;
|
||||
client_body_timeout 30s;
|
||||
client_header_timeout 30s;
|
||||
send_timeout 30s;
|
||||
gzip on;
|
||||
#more_clear_headers 'Server';
|
||||
add_header X-Frame-Options SAMEORIGIN always;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
|
||||
|
||||
|
||||
underscores_in_headers on;
|
||||
log_format main '$remote_addr - $remote_user [$time_local]'
|
||||
'#"$request_method $scheme://$host$request_uri $server_protocol" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for" - "$request_time"';
|
||||
access_log /data/nginx/logs/access.log main;
|
||||
|
||||
server {
|
||||
listen 8088 ssl;
|
||||
server_name lingyun.zyjctech.com
|
||||
index index.jsp index.htm index.html;
|
||||
|
||||
### 跨域设置(临时) ###
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
add_header 'Access-Control-Allow-Headers' 'Origin, Content-Type, Accept, Authorization';
|
||||
if ($request_method = 'OPTIONS') {
|
||||
return 204;
|
||||
}
|
||||
# 禁止 iframe 嵌套
|
||||
add_header X-Frame-Options SAMEORIGIN always;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
#######Cookie 标记为 HttpOnly 缺少、不一致或相互矛盾属性的 Cookie 漏洞修复
|
||||
add_header Set-Cookie "sessionid=aFM9PPvmF7kpynnx; HttpOnly; Secure; Path=/; SameSite=Lax;";
|
||||
#开启HTTP严格传输安全HSTS
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
|
||||
|
||||
### ssl配置 ###
|
||||
|
||||
ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
|
||||
ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
|
||||
ssl_session_timeout 10m;
|
||||
## 新增 ##
|
||||
#ssl_stapling_verify on;
|
||||
#ssl_session_cache shared:SSL:50m;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
#ssl_prefer_server_ciphers off;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
|
||||
|
||||
##############################
|
||||
|
||||
include /data/nginx/conf/vhost8088/*.conf;
|
||||
client_max_body_size 1024m;
|
||||
client_body_buffer_size 512k;
|
||||
client_header_timeout 3m;
|
||||
send_timeout 3m;
|
||||
proxy_connect_timeout 600;
|
||||
proxy_read_timeout 600;
|
||||
proxy_send_timeout 600;
|
||||
###自定义403返回拦截的ip#########
|
||||
error_page 403 /error.html;
|
||||
|
||||
location = /error.html {
|
||||
default_type text/plain;
|
||||
return 403 "Access failed. Please contact the administrator to add the IP whitelist IP:$remote_addr";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
#K8S DashBoard
|
||||
# server {
|
||||
# listen 30554 ssl;
|
||||
# ssl_certificate /data/nginx/conf/zyjctech.com_cert_chain.pem;
|
||||
# ssl_certificate_key /data/nginx/conf/zyjctech.com_key.key;
|
||||
# ssl_session_timeout 5m;
|
||||
|
||||
# ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
# ssl_prefer_server_ciphers off;
|
||||
# ssl_protocols TLSv1.2 TLSv1.3;
|
||||
|
||||
# client_max_body_size 1024m;
|
||||
# client_body_buffer_size 256k;
|
||||
# client_header_timeout 3m;
|
||||
# client_body_timeout 3m;
|
||||
# send_timeout 3m;
|
||||
|
||||
# proxy_connect_timeout 600;
|
||||
# proxy_read_timeout 600;
|
||||
# proxy_send_timeout 600;
|
||||
# proxy_buffer_size 256k;
|
||||
# proxy_buffers 4 256k;
|
||||
# proxy_busy_buffers_size 256k;
|
||||
|
||||
# location / {
|
||||
# proxy_pass https://127.0.0.1:32000;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
## minio控制台 ##
|
||||
|
||||
# server {
|
||||
# listen 32002; #或者用80端口也可以
|
||||
# server_name 188.106.25.136; #可以用域名
|
||||
# add_header X-Frame-Options SAMEORIGIN always;
|
||||
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
# location / {
|
||||
# proxy_next_upstream http_500 http_502 http_503 http_504 error timeout invalid_header;
|
||||
# proxy_set_header Host $http_host;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_pass http://188.106.25.132:9001;
|
||||
# expires 0;
|
||||
# }
|
||||
# }
|
||||
|
||||
## AiMapServer ##
|
||||
# server {
|
||||
# listen 32007; #或者用80端口也可以
|
||||
# server_name 188.106.25.136; #可以用域名
|
||||
# add_header X-Frame-Options SAMEORIGIN always;
|
||||
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
|
||||
# location / {
|
||||
# proxy_pass http://188.106.25.222:5090/aimap-server/manager/login;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# }
|
||||
# location /aimap-server {
|
||||
# proxy_pass http://188.106.25.222:5090/aimap-server;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# }
|
||||
# location /gisapi {
|
||||
# proxy_pass http://188.106.25.222:5090/gisapi;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Real-IP $remote_addr;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
|
||||
}
|
||||
1350
66-202505-浙江二级监管/已有部署备份/主机授权文件.json
Normal file
1350
66-202505-浙江二级监管/已有部署备份/主机授权文件.json
Normal file
File diff suppressed because it is too large
Load Diff
1348
66-202505-浙江二级监管/已有部署备份/授权码.json
Normal file
1348
66-202505-浙江二级监管/已有部署备份/授权码.json
Normal file
File diff suppressed because it is too large
Load Diff
1400
66-202505-浙江二级监管/部署文件/k8s-backend.yaml
Normal file
1400
66-202505-浙江二级监管/部署文件/k8s-backend.yaml
Normal file
File diff suppressed because it is too large
Load Diff
644
66-202505-浙江二级监管/部署文件/k8s-configmap.yaml
Normal file
644
66-202505-浙江二级监管/部署文件/k8s-configmap.yaml
Normal file
@@ -0,0 +1,644 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-splice
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "splice",
|
||||
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-traffic
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "traffic",
|
||||
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-smauth
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "smauth",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-pangu
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-open
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "open",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-jiangsuwenlv
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "jiangsuwenlv",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-qingdao
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "qingdao",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-uasms
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "uasms",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-dispatchh5
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "dispatchh5",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-base
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "base",
|
||||
AppClientId: "APP_9LY41OaKSqk2btY0"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-detection
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "detection",
|
||||
AppClientId: "APP_FDHW2VLVDWPnnOCy"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-securityh5
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "securityh5",
|
||||
AppClientId: "APP_N3ImO0Ubfu9peRHD"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-seniclive
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "seniclive",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-qinghaitourism
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "qinghaitourism",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-scanner
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "scanner",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-ai-brain
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "ai-brain",
|
||||
AppClientId: "APP_rafnuCAmBESIVYMH"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-cmsportal
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "cmsportal",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-threedsimulation
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "threedsimulation",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-uas
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "uas",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-dikongzhixingh5
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "dikongzhixingh5",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-iot
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "iot",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-security
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "security",
|
||||
AppClientId: "APP_JUSEMc7afyWXxvE7"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-hljtt
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "hljtt",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-visualization
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "visualization",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-pilot2cloud
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "pilot2cloud",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-blockchain
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "blockchain",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-smsecret
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "smsecret",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-secenter
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "secenter",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-uavmsmanager
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "uavmsmanager",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-supervisionh5
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "supervisionh5",
|
||||
AppClientId: "APP_qqSu82THfexI8PLM"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-armypeople
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "armypeople",
|
||||
AppClientId: "APP_UIegse6Lfou9pO1U"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-logistics
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "logistics",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-mws
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "mws",
|
||||
AppClientId: "APP_uKniXPELlRERBBwK"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-share
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "share",
|
||||
AppClientId: "APP_4lVSVI0ZGxTssir8"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-hyper
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "hyper",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-classification
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "classification",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-renyike
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "renyike",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-emergency
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "emergency",
|
||||
AppClientId: "APP_aGsTAY1uMZrpKdfk"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-multiterminal
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "multiterminal",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-oms
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "oms",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-supervision
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "supervision",
|
||||
AppClientId: "APP_qqSu82THfexI8PLM"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-media
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "media",
|
||||
AppClientId: "APP_4AU8lbifESQO4FD6"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-eventsh5
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "eventsh5",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-mianyangbackend
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "mianyangbackend",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-awareness
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "awareness",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-flight-control
|
||||
namespace: zjejpt-uas
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "lingyun.zyjctech.com:8088",
|
||||
ApplicationShortName: "flight-control",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
203
66-202505-浙江二级监管/部署文件/k8s-frontend.yaml
Normal file
203
66-202505-浙江二级监管/部署文件/k8s-frontend.yaml
Normal file
@@ -0,0 +1,203 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nginx-cm
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
data:
|
||||
nginx.conf: |
|
||||
server {
|
||||
listen 9528;
|
||||
server_name localhost;
|
||||
gzip on;
|
||||
|
||||
location / {
|
||||
root /home/cmii-platform/dist;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root html;
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cmii-uav-platform-uasms
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uasms
|
||||
octopus.control: frontend-app-wdd
|
||||
app.kubernetes.io/app-version: uas-2.1
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uasms
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uasms
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
containers:
|
||||
- name: cmii-uav-platform-uasms
|
||||
image: 192.168.10.3:8033/cmii/cmii-uav-platform-uasms:2.1-demo-20250527
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: K8S_NAMESPACE
|
||||
value: zjejpt-uas
|
||||
- name: APPLICATION_NAME
|
||||
value: cmii-uav-platform-uasms
|
||||
ports:
|
||||
- name: platform-9528
|
||||
containerPort: 9528
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: nginx-conf
|
||||
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||
subPath: nginx.conf
|
||||
- name: tenant-prefix
|
||||
subPath: ingress-config.js
|
||||
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||
volumes:
|
||||
- name: nginx-conf
|
||||
configMap:
|
||||
name: nginx-cm
|
||||
items:
|
||||
- key: nginx.conf
|
||||
path: nginx.conf
|
||||
- name: tenant-prefix
|
||||
configMap:
|
||||
name: tenant-prefix-uasms
|
||||
items:
|
||||
- key: ingress-config.js
|
||||
path: ingress-config.js
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cmii-uav-platform-uasms
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uasms
|
||||
octopus.control: frontend-app-wdd
|
||||
app.kubernetes.io/version: uas-2.1
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uasms
|
||||
ports:
|
||||
- name: web-svc-port
|
||||
port: 9528
|
||||
protocol: TCP
|
||||
targetPort: 9528
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cmii-uav-platform-uas
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uas
|
||||
octopus.control: frontend-app-wdd
|
||||
app.kubernetes.io/app-version: uas-2.1
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uas
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uas
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
containers:
|
||||
- name: cmii-uav-platform-uas
|
||||
image: 192.168.10.3:8033/cmii/cmii-uav-platform-uas:2.1-demo-20250527
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: K8S_NAMESPACE
|
||||
value: zjejpt-uas
|
||||
- name: APPLICATION_NAME
|
||||
value: cmii-uav-platform-uas
|
||||
ports:
|
||||
- name: platform-9528
|
||||
containerPort: 9528
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: nginx-conf
|
||||
mountPath: /etc/nginx/conf.d/nginx.conf
|
||||
subPath: nginx.conf
|
||||
- name: tenant-prefix
|
||||
subPath: ingress-config.js
|
||||
mountPath: /home/cmii-platform/dist/ingress-config.js
|
||||
volumes:
|
||||
- name: nginx-conf
|
||||
configMap:
|
||||
name: nginx-cm
|
||||
items:
|
||||
- key: nginx.conf
|
||||
path: nginx.conf
|
||||
- name: tenant-prefix
|
||||
configMap:
|
||||
name: tenant-prefix-uas
|
||||
items:
|
||||
- key: ingress-config.js
|
||||
path: ingress-config.js
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cmii-uav-platform-uas
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uas
|
||||
octopus.control: frontend-app-wdd
|
||||
app.kubernetes.io/version: uas-2.1
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.type: frontend
|
||||
cmii.app: cmii-uav-platform-uas
|
||||
ports:
|
||||
- name: web-svc-port
|
||||
port: 9528
|
||||
protocol: TCP
|
||||
targetPort: 9528
|
||||
66
66-202505-浙江二级监管/部署文件/k8s-ingress.yaml
Normal file
66
66-202505-浙江二级监管/部署文件/k8s-ingress.yaml
Normal file
@@ -0,0 +1,66 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: frontend-applications-ingress
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
type: frontend
|
||||
octopus.control: all-ingress-config-wdd
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: uas-2.1
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
rewrite ^(/uas)$ $1/ redirect;
|
||||
rewrite ^(/uasms)$ $1/ redirect;
|
||||
spec:
|
||||
rules:
|
||||
- host: fake-domain.zjejpt-uas.io
|
||||
http:
|
||||
paths:
|
||||
- path: /uas/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-uas
|
||||
servicePort: 9528
|
||||
- path: /uasms/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uav-platform-uasms
|
||||
servicePort: 9528
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: all-gateways-ingress
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
type: api-gateway
|
||||
octopus.control: all-ingress-config-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: uas-2.1
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header upgradePrefix $http_upgrade;
|
||||
proxy_set_header Connection "upgradePrefix";
|
||||
spec:
|
||||
rules:
|
||||
- host: fake-domain.zjejpt-uas.io
|
||||
http:
|
||||
paths:
|
||||
- path: /uas/api/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-uas-gateway
|
||||
servicePort: 8080
|
||||
- path: /converge/?(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
serviceName: cmii-sky-converge
|
||||
servicePort: 8080
|
||||
38
66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml
Normal file
38
66-202505-浙江二级监管/部署文件/k8s-nfs-test.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim-uas
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-uas-storage-class" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-uas-storage-class
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod-uas
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 192.168.10.3:8033/cmii/alpine:1.0.0
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
114
66-202505-浙江二级监管/部署文件/k8s-nfs.yaml
Normal file
114
66-202505-浙江二级监管/部署文件/k8s-nfs.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner-uas
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-uas-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner-uas
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner-uas
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-uas-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner-uas
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner-uas
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner-uas
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner-uas
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-uas-storage-class
|
||||
provisioner: cmlc-nfs-uas-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner-uas
|
||||
labels:
|
||||
app: nfs-client-provisioner-uas
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner-uas
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner-uas
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
serviceAccountName: nfs-client-provisioner-uas
|
||||
containers:
|
||||
- name: nfs-client-provisioner-uas
|
||||
image: 192.168.10.3:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-uas-storage
|
||||
- name: NFS_SERVER
|
||||
value: 192.168.10.2
|
||||
- name: NFS_PATH
|
||||
value: /data/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 192.168.10.2
|
||||
path: /data/nfs_data
|
||||
20
66-202505-浙江二级监管/部署文件/k8s-pvc.yaml
Normal file
20
66-202505-浙江二级监管/部署文件/k8s-pvc.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: zjejpt-uas
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: uas-2.1
|
||||
spec:
|
||||
storageClassName: nfs-uas-storage-class
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 1000Gi
|
||||
|
||||
Reference in New Issue
Block a user