add
This commit is contained in:
37
21-资阳移动/0.0-dependencies.sh
Normal file
37
21-资阳移动/0.0-dependencies.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# 需要在所有的节点执行
|
||||
|
||||
hostnamectl set-hostname master-node
|
||||
|
||||
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
|
||||
|
||||
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
|
||||
systemctl restart sshd
|
||||
|
||||
cat >> /etc/hosts << EOF
|
||||
192.168.6.6 master-node
|
||||
192.168.6.5 worker-1
|
||||
192.168.6.7 worker-2
|
||||
192.168.6.3 worker-3
|
||||
192.168.6.4 storage-1
|
||||
EOF
|
||||
|
||||
yum clean all && yum makecache
|
||||
|
||||
|
||||
root
|
||||
|
||||
10.18.70.43
|
||||
Zytxjjh@0305
|
||||
|
||||
|
||||
journalctl -u octopus-agent.service -n 200 -f
|
||||
|
||||
bash <(curl -sL http://42.192.52.227:9000/octopus/init-script-wdd.sh) --url http://42.192.52.227:9000/octopus --agent-install --offline
|
||||
|
||||
vim /usr/local/etc/octpus-agent/octopus-agent.conf
|
||||
|
||||
systemctl restart octopus-agent
|
||||
|
||||
1765654823726669826
|
||||
42
21-资阳移动/1-minio-init.sh
Normal file
42
21-资阳移动/1-minio-init.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
# 替换namespace
|
||||
# 替换minio的实际地址和端口
|
||||
# 修改rabbitmq的实际地址和端口,需要暴露出来
|
||||
|
||||
curl https://dl.min.io/client/mc/release/linux-amd64/mc -o mc
|
||||
|
||||
export tenant_name=zyyd
|
||||
|
||||
mc alias set ${tenant_name} http://192.168.6.4:9000 cmii B#923fC7mk
|
||||
|
||||
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls
|
||||
|
||||
|
||||
# mc alias set demo https://oss.demo.uavcmlc.com:18000 cmii B#923fC7mk
|
||||
#
|
||||
# mc cp -r demo/jadenq/scenariomock/xg/ ${tenant_name}/jadenq/scenariomock/xg/
|
||||
# mc cp -r demo/jadenq/application/file/中移凌云使用手册.pdf ${tenant_name}/jadenq/application/file/中移凌云使用手册.pdf
|
||||
# mc cp -r demo/jadenq/defimage/def.jpg ${tenant_name}/jadenq/defimage/def.jpg
|
||||
# mc cp -r demo/pub-cms/application/img/ ${tenant_name}/pub-cms/application/img/
|
||||
|
||||
|
||||
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@192.168.6.5:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
|
||||
|
||||
mc admin service restart ${tenant_name}
|
||||
|
||||
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
|
||||
|
||||
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
|
||||
|
||||
mc ilm add --expiry-days "1" ${tenant_name}/tus
|
||||
|
||||
331
21-资阳移动/10-2-ingress-nginx-conf.sh
Normal file
331
21-资阳移动/10-2-ingress-nginx-conf.sh
Normal file
@@ -0,0 +1,331 @@
|
||||
|
||||
--boundary_.oOo._zgzMgQh/rOnRA9A4gE+lHiV8P1Y9iLla
|
||||
Content-Length: 11851
|
||||
Content-Type: application/octet-stream
|
||||
If-Match: "3fac96b57c20286bab1a25b12072e39f"
|
||||
X-File-MD5: f4379314e889122626fd2ce8b321932f
|
||||
X-File-Mtime: 1712558780
|
||||
X-File-Path: /MasterLoad/Work/UAV Cloud/22.希腊项目-阿里云-德国/999-部署模板/k8s-srs-template.yaml
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-cm
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-srs
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
data:
|
||||
srs.rtc.conf: |-
|
||||
listen 30935;
|
||||
max_connections 4096;
|
||||
srs_log_tank console;
|
||||
srs_log_level info;
|
||||
srs_log_file /home/srs.log;
|
||||
daemon off;
|
||||
http_api {
|
||||
enabled on;
|
||||
listen 1985;
|
||||
crossdomain on;
|
||||
}
|
||||
stats {
|
||||
network 0;
|
||||
}
|
||||
http_server {
|
||||
enabled on;
|
||||
listen 8080;
|
||||
dir /home/hls;
|
||||
}
|
||||
srt_server {
|
||||
enabled on;
|
||||
listen 30556;
|
||||
maxbw 1000000000;
|
||||
connect_timeout 4000;
|
||||
peerlatency 600;
|
||||
recvlatency 600;
|
||||
}
|
||||
rtc_server {
|
||||
enabled on;
|
||||
listen 30090;
|
||||
candidate $CANDIDATE;
|
||||
}
|
||||
vhost __defaultVhost__ {
|
||||
http_hooks {
|
||||
enabled on;
|
||||
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
|
||||
}
|
||||
http_remux {
|
||||
enabled on;
|
||||
}
|
||||
rtc {
|
||||
enabled on;
|
||||
rtmp_to_rtc on;
|
||||
rtc_to_rtmp on;
|
||||
keep_bframe off;
|
||||
}
|
||||
tcp_nodelay on;
|
||||
min_latency on;
|
||||
play {
|
||||
gop_cache off;
|
||||
mw_latency 100;
|
||||
mw_msgs 10;
|
||||
}
|
||||
publish {
|
||||
firstpkt_timeout 8000;
|
||||
normal_timeout 4000;
|
||||
mr on;
|
||||
}
|
||||
dvr {
|
||||
enabled off;
|
||||
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
|
||||
dvr_plan session;
|
||||
}
|
||||
hls {
|
||||
enabled on;
|
||||
hls_path /home/hls;
|
||||
hls_fragment 10;
|
||||
hls_window 60;
|
||||
hls_m3u8_file [app]/[stream].m3u8;
|
||||
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
|
||||
hls_cleanup on;
|
||||
hls_entry_prefix http://A1C1IP:A1C1JS;
|
||||
}
|
||||
}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc-exporter
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
nodePort: 30935
|
||||
- name: rtc
|
||||
protocol: UDP
|
||||
port: 30090
|
||||
targetPort: 30090
|
||||
nodePort: 30090
|
||||
- name: srt
|
||||
protocol: UDP
|
||||
port: 30556
|
||||
targetPort: 30556
|
||||
nodePort: 30556
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
nodePort: 30557
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
externalTrafficPolicy: Cluster
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srsrtc-svc
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-srs-rtc
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-srs
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
srs-role: rtc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
srs-role: rtc
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
srs-role: rtc
|
||||
spec:
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: helm-live-srs-cm
|
||||
items:
|
||||
- key: srs.rtc.conf
|
||||
path: docker.conf
|
||||
defaultMode: 420
|
||||
- name: srs-vol
|
||||
emptyDir:
|
||||
sizeLimit: 8Gi
|
||||
containers:
|
||||
- name: srs-rtc
|
||||
image: 'A1C2IP:8033/cmii/srs:v5.0.195'
|
||||
ports:
|
||||
- name: srs-rtmp
|
||||
containerPort: 30935
|
||||
protocol: TCP
|
||||
- name: srs-api
|
||||
containerPort: 1985
|
||||
protocol: TCP
|
||||
- name: srs-flv
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
- name: srs-webrtc
|
||||
containerPort: 30090
|
||||
protocol: UDP
|
||||
- name: srs-srt
|
||||
containerPort: 30556
|
||||
protocol: UDP
|
||||
env:
|
||||
- name: CANDIDATE
|
||||
value: A1C1IP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 6Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /usr/local/srs/conf/docker.conf
|
||||
subPath: docker.conf
|
||||
- name: srs-vol
|
||||
mountPath: /home/dvr
|
||||
subPath: SUPREME/helm-live/dvr
|
||||
- name: srs-vol
|
||||
mountPath: /home/hls
|
||||
subPath: SUPREME/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
- name: oss-adaptor
|
||||
image: 'A1C2IP:8033/cmii/cmii-srs-oss-adaptor:2023-SA'
|
||||
env:
|
||||
- name: OSS_ENDPOINT
|
||||
value: 'http://M2D2IP:9000'
|
||||
- name: OSS_AK
|
||||
value: cmii
|
||||
- name: OSS_SK
|
||||
value: 'B#923fC7mk'
|
||||
- name: OSS_BUCKET
|
||||
value: live-cluster-hls
|
||||
- name: SRS_OP
|
||||
value: 'http://helm-live-op-svc-v2:8080'
|
||||
- name: MYSQL_ENDPOINT
|
||||
value: 'helm-mysql:3306'
|
||||
- name: MYSQL_USERNAME
|
||||
value: k8s_admin
|
||||
- name: MYSQL_PASSWORD
|
||||
value: fP#UaH6qQ3)8
|
||||
- name: MYSQL_DATABASE
|
||||
value: cmii_live_srs_op
|
||||
- name: MYSQL_TABLE
|
||||
value: live_segment
|
||||
- name: LOG_LEVEL
|
||||
value: info
|
||||
- name: OSS_META
|
||||
value: 'yes'
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-vol
|
||||
mountPath: /cmii/share/hls
|
||||
subPath: SUPREME/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity: {}
|
||||
schedulerName: default-scheduler
|
||||
serviceName: helm-live-srsrtc-svc
|
||||
podManagementPolicy: OrderedReady
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: 0
|
||||
revisionHistoryLimit: 10
|
||||
---
|
||||
# live-srs部分
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-op-v2
|
||||
namespace: SUPREME
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-engine
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-live-op-2.0.0
|
||||
live-role: op-v2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
live-role: op-v2
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
live-role: op-v2
|
||||
spe
|
||||
13
21-资阳移动/10-minio-dockercompose.yml
Normal file
13
21-资阳移动/10-minio-dockercompose.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
minio1:
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
image: '192.168.6.6:8033/cmii/minio:2023.5.4'
|
||||
environment:
|
||||
- MINIO_ROOT_USER=cmii
|
||||
- MINIO_ROOT_PASSWORD=B#923fC7mk
|
||||
volumes:
|
||||
- /var/lib/docker/minio-pv/:/mnt/data
|
||||
31
21-资阳移动/11-nginx-offline-map.conf
Normal file
31
21-资阳移动/11-nginx-offline-map.conf
Normal file
@@ -0,0 +1,31 @@
|
||||
server {
|
||||
listen 8889;
|
||||
server_name localhost;
|
||||
#允许跨域请求的域,*代表所有
|
||||
add_header 'Access-Control-Allow-Origin' *;
|
||||
#允许带上cookie请求
|
||||
add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
#允许请求的方法,比如 GET/POST/PUT/DELETE
|
||||
add_header 'Access-Control-Allow-Methods' *;
|
||||
#允许请求的header
|
||||
add_header 'Access-Control-Allow-Headers' *;
|
||||
|
||||
location /electronic {
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
location /satellite {
|
||||
root /root/offline_map/;
|
||||
autoindex on;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Allow-Methods 'GET,POST';
|
||||
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
|
||||
}
|
||||
|
||||
# http://192.168.6.6:8889/electronic/{z}/{x}/{y}.png
|
||||
# http://192.168.6.6:8889/satellite/{z}/{x}/{y}.png
|
||||
}
|
||||
43
21-资阳移动/11-nginx-proxy.conf
Normal file
43
21-资阳移动/11-nginx-proxy.conf
Normal file
@@ -0,0 +1,43 @@
|
||||
server {
|
||||
listen 8888;
|
||||
server_name localhost;
|
||||
location / {
|
||||
proxy_pass http://localhost:30500;
|
||||
client_max_body_size 5120m;
|
||||
client_body_buffer_size 5120m;
|
||||
client_body_timeout 6000s;
|
||||
proxy_send_timeout 10000s;
|
||||
proxy_read_timeout 10000s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_max_temp_file_size 5120m;
|
||||
proxy_request_buffering on;
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.zyga.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /_AMapService/v4/map/styles {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://webapi.amap.com/v4/ap/styles;
|
||||
}
|
||||
|
||||
location /_AMapService/ {
|
||||
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
|
||||
proxy_pass https://restapi.amap.com/;
|
||||
}
|
||||
|
||||
location /rtc/v1/ {
|
||||
add_header Access-Control-Allow-Headers X-Requested-With;
|
||||
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
|
||||
proxy_pass http://127.0.0.1:30985/rtc/v1/;
|
||||
}
|
||||
|
||||
location ~ ^/\w*/actuator/ {
|
||||
return 403;
|
||||
}
|
||||
}
|
||||
43
21-资阳移动/11-nginx.conf
Normal file
43
21-资阳移动/11-nginx.conf
Normal file
@@ -0,0 +1,43 @@
|
||||
user root;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
use epoll;
|
||||
worker_connections 65535;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
server_tokens off;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
send_timeout 1200;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 600;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 2048m;
|
||||
client_body_buffer_size 2048m;
|
||||
|
||||
underscores_in_headers on;
|
||||
|
||||
proxy_send_timeout 600;
|
||||
proxy_read_timeout 600;
|
||||
proxy_connect_timeout 600;
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 8 256k;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
stream {
|
||||
include /etc/nginx/conf.d/stream/*.conf;
|
||||
}
|
||||
147
21-资阳移动/2-imageDownSync.sh
Normal file
147
21-资阳移动/2-imageDownSync.sh
Normal file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
#### 需要修改以下的内容 ###
|
||||
|
||||
cmlc_app_image_list="cmlc-app-images-4.1.6.txt" # 需要修改版本
|
||||
rancher_image_list="kubernetes-images-2.5.7-1.20.4.txt" # 一般不需要修改
|
||||
middleware_image_list="middleware-images.txt" # 一般不需要修改
|
||||
#DockerRegisterDomain="20.47.129.116:8033" # 需要根据实际修改
|
||||
DockerRegisterDomain="192.168.6.6:8033" # 需要根据实际修改
|
||||
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
|
||||
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
#### 需要修改以上的内容 ###
|
||||
|
||||
downloadAllNeededImages() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
downloadAllNeededImagesAndCompress() {
|
||||
while [[ $# > 0 ]]; do
|
||||
pulled=""
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
echo "开始下载:${i}"
|
||||
if docker pull "${i}" >/dev/null 2>&1; then
|
||||
echo "Image pull success: ${i}"
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
if docker inspect "${i}" >/dev/null 2>&1; then
|
||||
pulled="${pulled} ${i}"
|
||||
else
|
||||
echo "Image pull failed: ${i}"
|
||||
fi
|
||||
fi
|
||||
echo "-------------------------------------------------"
|
||||
done <"${1}"
|
||||
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
|
||||
|
||||
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
|
||||
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
|
||||
echo "已经完成打包工作!"
|
||||
}
|
||||
|
||||
pushRKEImageToHarbor(){
|
||||
linux_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
linux_images+=("${i}");
|
||||
done < "${rancher_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
|
||||
for i in "${linux_images[@]}"; do
|
||||
[ -z "${i}" ] && continue
|
||||
case $i in
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/${i}"
|
||||
;;
|
||||
*)
|
||||
image_name="${DockerRegisterDomain}/rancher/${i}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${i}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
pushCMLCAPPImageToHarbor(){
|
||||
app_images=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
app_images+=("${i}");
|
||||
done < "${cmlc_app_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${app_images[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
pushMiddlewareImageToHarbor(){
|
||||
middleware_image=()
|
||||
while IFS= read -r i; do
|
||||
[ -z "${i}" ] && continue
|
||||
middleware_image+=("${i}");
|
||||
done < "${middleware_image_list}"
|
||||
|
||||
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
|
||||
for app in "${middleware_image[@]}"; do
|
||||
[ -z "${app}" ] && continue
|
||||
case ${app} in
|
||||
*/*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
|
||||
;;
|
||||
*/*)
|
||||
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "开始镜像至私有仓库推送:${image_name}"
|
||||
docker tag "${app}" "${image_name}"
|
||||
docker push "${image_name}"
|
||||
echo "-------------------------------------------------"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
#downloadAllNeededImagesAndCompress "${middleware_image_list}"
|
||||
#downloadAllNeededImages "${rancher_image_list}"
|
||||
|
||||
#pushRKEImageToHarbor
|
||||
#pushCMLCAPPImageToHarbor
|
||||
pushMiddlewareImageToHarbor
|
||||
192
21-资阳移动/cluster.yanl
Normal file
192
21-资阳移动/cluster.yanl
Normal file
@@ -0,0 +1,192 @@
|
||||
nodes:
|
||||
- address: 192.168.6.6
|
||||
user: root
|
||||
port: 2022
|
||||
role:
|
||||
- controlplane
|
||||
- etcd
|
||||
- worker
|
||||
internal_address: 192.168.6.6
|
||||
labels:
|
||||
ingress-deploy: true
|
||||
- address: 192.168.6.5
|
||||
user: root
|
||||
port: 2022
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.6.5
|
||||
- address: 192.168.6.7
|
||||
user: root
|
||||
port: 2022
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.6.7
|
||||
- address: 192.168.6.3
|
||||
user: root
|
||||
port: 2022
|
||||
role:
|
||||
- worker
|
||||
internal_address: 192.168.6.3
|
||||
labels:
|
||||
mysql-deploy: true
|
||||
|
||||
authentication:
|
||||
strategy: x509
|
||||
sans:
|
||||
- "192.168.6.6"
|
||||
- "117.175.46.61"
|
||||
- "127.0.0.1"
|
||||
|
||||
private_registries:
|
||||
- url: 192.168.6.6:8033 # 私有镜像库地址
|
||||
user: admin
|
||||
password: "V2ryStr@ngPss"
|
||||
is_default: true
|
||||
|
||||
##############################################################################
|
||||
|
||||
# 默认值为false,如果设置为true,当发现不支持的Docker版本时,RKE不会报错
|
||||
ignore_docker_version: true
|
||||
|
||||
# Set the name of the Kubernetes cluster
|
||||
cluster_name: rke-cluster
|
||||
|
||||
kubernetes_version: v1.20.4-rancher1-1
|
||||
|
||||
port: 2022
|
||||
ssh_key_path: /root/.ssh/id_ed25519
|
||||
#ssh_key_path: /root/.ssh/id_rsa
|
||||
|
||||
# Enable running cri-dockerd
|
||||
# Up to Kubernetes 1.23, kubelet contained code called dockershim
|
||||
# to support Docker runtime. The replacement is called cri-dockerd
|
||||
# and should be enabled if you want to keep using Docker as your
|
||||
# container runtime
|
||||
# Only available to enable in Kubernetes 1.21 and higher
|
||||
enable_cri_dockerd: true
|
||||
|
||||
services:
|
||||
etcd:
|
||||
backup_config:
|
||||
enabled: false
|
||||
interval_hours: 72
|
||||
retention: 3
|
||||
safe_timestamp: false
|
||||
timeout: 300
|
||||
creation: 12h
|
||||
extra_args:
|
||||
election-timeout: 5000
|
||||
heartbeat-interval: 500
|
||||
gid: 0
|
||||
retention: 72h
|
||||
snapshot: false
|
||||
uid: 0
|
||||
|
||||
kube-api:
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-controller
|
||||
service_cluster_ip_range: 172.24.0.0/16
|
||||
# Expose a different port range for NodePort services
|
||||
service_node_port_range: 30000-40000
|
||||
always_pull_images: true
|
||||
pod_security_policy: false
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Enable audit log to stdout
|
||||
audit-log-path: "-"
|
||||
# Increase number of delete workers
|
||||
delete-collection-workers: 3
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
kube-controller:
|
||||
# CIDR pool used to assign IP addresses to pods in the cluster
|
||||
cluster_cidr: 172.28.0.0/16
|
||||
# IP range for any services created on Kubernetes
|
||||
# This must match the service_cluster_ip_range in kube-api
|
||||
service_cluster_ip_range: 172.24.0.0/16
|
||||
# Add additional arguments to the kubernetes API server
|
||||
# This WILL OVERRIDE any existing defaults
|
||||
extra_args:
|
||||
# Set the level of log output to debug-level
|
||||
v: 1
|
||||
# Enable RotateKubeletServerCertificate feature gate
|
||||
feature-gates: RotateKubeletServerCertificate=true
|
||||
# Enable TLS Certificates management
|
||||
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
|
||||
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
|
||||
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
|
||||
kubelet:
|
||||
# Base domain for the cluster
|
||||
cluster_domain: cluster.local
|
||||
# IP address for the DNS service endpoint
|
||||
cluster_dns_server: 172.24.0.10
|
||||
# Fail if swap is on
|
||||
fail_swap_on: false
|
||||
# Set max pods to 250 instead of default 110
|
||||
extra_binds:
|
||||
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
|
||||
extra_args:
|
||||
max-pods: 122
|
||||
# Optionally define additional volume binds to a service
|
||||
scheduler:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 0
|
||||
kubeproxy:
|
||||
extra_args:
|
||||
# Set the level of log output to warning-level
|
||||
v: 1
|
||||
|
||||
authorization:
|
||||
mode: rbac
|
||||
|
||||
addon_job_timeout: 30
|
||||
|
||||
# Specify network plugin-in (canal, calico, flannel, weave, or none)
|
||||
network:
|
||||
options:
|
||||
flannel_backend_type: vxlan
|
||||
flannel_iface: ens6
|
||||
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
|
||||
plugin: flannel
|
||||
|
||||
# Specify DNS provider (coredns or kube-dns)
|
||||
dns:
|
||||
provider: coredns
|
||||
nodelocal: {}
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
maxSurge: 15%
|
||||
linear_autoscaler_params:
|
||||
cores_per_replica: 0.34
|
||||
nodes_per_replica: 4
|
||||
prevent_single_point_failure: true
|
||||
min: 2
|
||||
max: 3
|
||||
|
||||
# Specify monitoring provider (metrics-server)
|
||||
monitoring:
|
||||
provider: metrics-server
|
||||
# Available as of v1.1.0
|
||||
update_strategy:
|
||||
strategy: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 8
|
||||
|
||||
ingress:
|
||||
provider: nginx
|
||||
default_backend: true
|
||||
http_port: 0
|
||||
https_port: 0
|
||||
extra_envs:
|
||||
- name: TZ
|
||||
value: Asia/Shanghai
|
||||
node_selector:
|
||||
ingress-deploy: true
|
||||
options:
|
||||
use-forwarded-headers: "true"
|
||||
65
21-资阳移动/image-cmii-5.4.0.txt
Normal file
65
21-资阳移动/image-cmii-5.4.0.txt
Normal file
@@ -0,0 +1,65 @@
|
||||
[
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-app-release:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-hljtt:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qingdao:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:5.4.0",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.4.0",
|
||||
"ossrs/srs:v5.0.195",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA",
|
||||
"harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0"
|
||||
]
|
||||
229
21-资阳移动/image-download.sh
Normal file
229
21-资阳移动/image-download.sh
Normal file
@@ -0,0 +1,229 @@
|
||||
#!/bin/bash
|
||||
|
||||
cmii_image_list=(
|
||||
cmlc=cmii=cmii-admin-data=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-admin-gateway=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-admin-user=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-live-operator=5.2.0.tar.gz
|
||||
cmlc=cmii=cmii-open-gateway=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-srs-oss-adaptor=2023-SA.tar.gz
|
||||
cmlc=cmii=cmii-suav-platform-supervision=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-suav-platform-supervisionh5=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-suav-supervision=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-airspace=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-alarm=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-brain=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-cloud-live=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-data-post-process=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-developer=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-device=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-gateway=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-industrial-portfolio=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-integration=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-kpi-monitor=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-logger=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-material-warehouse=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-mission=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-mqtthandler=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-notice=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-oauth=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-ai-brain=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-armypeople=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-base=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-cms-portal=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-detection=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-logistics=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-media=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-multiterminal=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-mws=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-oms=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-open=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-securityh5=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-share=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-platform-splice=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-process=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-surveillance=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-tower=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-user=5.4.0.tar.gz
|
||||
cmlc=cmii=cmii-uav-waypoint=5.4.0.tar.gz
|
||||
cmlc=cmii=ossrs=v5.0.195.tar.gz
|
||||
)
|
||||
middle_image_list=(
|
||||
cmlc=cmii=nfs-subdir-external-provisioner=v4.0.2.tar.gz
|
||||
docker=busybox=latest.tar.gz
|
||||
cmlc=cmii=srs=v4.0-r3.tar.gz
|
||||
docker=emqx=emqx=4.2.12.tar.gz
|
||||
docker=bitnami=bitnami-shell=10-debian-10-r140.tar.gz
|
||||
docker=kubernetesui=dashboard=v2.0.1.tar.gz
|
||||
docker=bitnami=bitnami-shell=11-debian-11-r136.tar.gz
|
||||
docker=kubernetesui=metrics-scraper=v1.0.4.tar.gz
|
||||
docker=bitnami=minio=2022.5.4.tar.gz
|
||||
docker=mongo=5.0.tar.gz
|
||||
docker=bitnami=minio=2023.5.4.tar.gz
|
||||
docker=nacos=nacos-server=v2.1.2-slim.tar.gz
|
||||
docker=bitnami=mysql=8.0.35-debian-11-r1.tar.gz
|
||||
docker=nginx=1.21.3.tar.gz
|
||||
docker=bitnami=mysql=8.1.0-debian-11-r42.tar.gz
|
||||
docker=ossrs=srs=v4.0.136.tar.gz
|
||||
docker=bitnami=rabbitmq=3.11.26-debian-11-r2.tar.gz
|
||||
docker=ossrs=srs=v5.0.195.tar.gz
|
||||
docker=bitnami=rabbitmq=3.9.12-debian-10-r3.tar.gz
|
||||
docker=rabbitmq=3.9-management.tar.gz
|
||||
docker=bitnami=redis=6.2.14-debian-11-r1.tar.gz
|
||||
docker=redis=6.0.20-alpine.tar.gz
|
||||
docker=bitnami=redis=6.2.6-debian-10-r0.tar.gz
|
||||
docker=simonrupf=chronyd=0.4.3.tar.gz
|
||||
)
|
||||
rke_image_list=(
|
||||
docker=rancher=backup-restore-operator=v1.0.3.tar.gz
|
||||
docker=rancher=calico-cni=v3.17.2.tar.gz
|
||||
docker=rancher=calico-ctl=v3.17.2.tar.gz
|
||||
docker=rancher=calico-kube-controllers=v3.17.2.tar.gz
|
||||
docker=rancher=calico-node=v3.17.2.tar.gz
|
||||
docker=rancher=calico-pod2daemon-flexvol=v3.17.2.tar.gz
|
||||
docker=rancher=cis-operator=v1.0.3.tar.gz
|
||||
docker=rancher=cluster-proportional-autoscaler=1.7.1.tar.gz
|
||||
docker=rancher=cluster-proportional-autoscaler=1.8.1.tar.gz
|
||||
docker=rancher=configmap-reload=v0.3.0-rancher4.tar.gz
|
||||
docker=rancher=coredns-coredns=1.8.0.tar.gz
|
||||
docker=rancher=coreos-etcd=v3.4.14-rancher1.tar.gz
|
||||
docker=rancher=coreos-flannel=v0.13.0-rancher1.tar.gz
|
||||
docker=rancher=coreos-kube-state-metrics=v1.9.7.tar.gz
|
||||
docker=rancher=coreos-prometheus-config-reloader=v0.39.0.tar.gz
|
||||
docker=rancher=coreos-prometheus-operator=v0.39.0.tar.gz
|
||||
docker=rancher=externalip-webhook=v0.1.6.tar.gz
|
||||
docker=rancher=flannel-cni=v0.3.0-rancher6.tar.gz
|
||||
docker=rancher=fleet-agent=v0.3.4.tar.gz
|
||||
docker=rancher=fleet=v0.3.4.tar.gz
|
||||
docker=rancher=fluentd=v0.1.24.tar.gz
|
||||
docker=rancher=grafana-grafana=7.1.5.tar.gz
|
||||
docker=rancher=hyperkube=v1.20.4-rancher1.tar.gz
|
||||
docker=rancher=istio-kubectl=1.5.10.tar.gz
|
||||
docker=rancher=jimmidyson-configmap-reload=v0.3.0.tar.gz
|
||||
docker=rancher=k8s-dns-dnsmasq-nanny=1.15.2.tar.gz
|
||||
docker=rancher=k8s-dns-kube-dns=1.15.2.tar.gz
|
||||
docker=rancher=k8s-dns-node-cache=1.15.13.tar.gz
|
||||
docker=rancher=k8s-dns-sidecar=1.15.2.tar.gz
|
||||
docker=rancher=klipper-lb=v0.1.2.tar.gz
|
||||
docker=rancher=kube-api-auth=v0.1.4.tar.gz
|
||||
docker=rancher=kubernetes-external-dns=v0.7.3.tar.gz
|
||||
docker=rancher=library-busybox=1.31.1.tar.gz
|
||||
docker=rancher=library-busybox=1.32.1.tar.gz
|
||||
docker=rancher=library-nginx=1.19.2-alpine.tar.gz
|
||||
docker=rancher=library-traefik=1.7.19.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.11.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.14.tar.gz
|
||||
docker=rancher=local-path-provisioner=v0.0.19.tar.gz
|
||||
docker=rancher=log-aggregator=v0.1.7.tar.gz
|
||||
docker=rancher=metrics-server=v0.4.1.tar.gz
|
||||
docker=rancher=nginx-ingress-controller-defaultbackend=1.5-rancher1.tar.gz
|
||||
docker=rancher=nginx-ingress-controller=nginx-0.43.0-rancher1.tar.gz
|
||||
docker=rancher=opa-gatekeeper=v3.1.0-beta.7.tar.gz
|
||||
docker=rancher=openzipkin-zipkin=2.14.2.tar.gz
|
||||
docker=rancher=pause=3.2.tar.gz
|
||||
docker=rancher=plugins-docker=18.09.tar.gz
|
||||
docker=rancher=prom-alertmanager=v0.21.0.tar.gz
|
||||
docker=rancher=prometheus-auth=v0.2.1.tar.gz
|
||||
docker=rancher=prom-node-exporter=v1.0.1.tar.gz
|
||||
docker=rancher=prom-prometheus=v2.18.2.tar.gz
|
||||
docker=rancher=rancher-agent=v2.5.7.tar.gz
|
||||
docker=rancher=rancher=v2.5.7.tar.gz
|
||||
docker=rancher=rancher-webhook=v0.1.0-beta9.tar.gz
|
||||
docker=rancher=rke-tools=v0.1.72.tar.gz
|
||||
docker=rancher=security-scan=v0.1.14.tar.gz
|
||||
docker=rancher=security-scan=v0.2.2.tar.gz
|
||||
docker=rancher=shell=v0.1.6.tar.gz
|
||||
docker=rancher=sonobuoy-sonobuoy=v0.16.3.tar.gz
|
||||
docker=rancher=system-upgrade-controller=v0.6.2.tar.gz
|
||||
)
|
||||
|
||||
oss_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/5.4.0
|
||||
oss_middle_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/mid-image-amd64
|
||||
oss_rke_prefix=https://oss.demo.uavcmlc.com/cmlc-installation/rke-image-amd64
|
||||
target_harbor_host=192.168.6.6:8033
|
||||
rke_image_local_prefix=/root/wdd/image/rke/
|
||||
mid_image_local_prefix=/root/wdd/image/middle/
|
||||
|
||||
cmii_image_download_from_oss() {
|
||||
for image in "${cmii_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
# curl -x socks5h://172.18.10.251:9997 $oss_prefix/$image -o $image
|
||||
curl $oss_prefix/$image -o $image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
middle_image_download_from_oss() {
|
||||
mkdir -p ${mid_image_local_prefix}
|
||||
for image in "${middle_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
# curl -x socks5h://172.18.10.251:9997 $oss_middle_prefix/$image -o ${mid_image_local_prefix}$image
|
||||
curl $oss_middle_prefix/$image -o ${mid_image_local_prefix}$image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
rke_image_download_from_oss() {
|
||||
mkdir -p ${rke_image_local_prefix}
|
||||
for image in "${rke_image_list[@]}"; do
|
||||
echo "start to download => $image"
|
||||
# curl -x socks5h://172.18.10.251:9997 $oss_rke_prefix/$image -o $rke_image_local_prefix$image
|
||||
curl $oss_rke_prefix/$image -o $rke_image_local_prefix$image
|
||||
echo ""
|
||||
done
|
||||
}
|
||||
|
||||
image_load_to_harbor() {
|
||||
local cmii_harbor_prefix="harbor.cdcyy.com.cn/cmii/"
|
||||
for image in "${cmii_image_list[@]}"; do
|
||||
echo "start to load => $image"
|
||||
docker load <"$image"
|
||||
echo ""
|
||||
if [[ $image == cmlc* ]]; then
|
||||
local app_name=$(echo $image | cut -d "=" -f3)
|
||||
local ccc=$(echo $image | cut -d "=" -f4)
|
||||
local app_tag="${ccc%.tar.gz}"
|
||||
echo "from $cmii_harbor_prefix$app_name:$app_tag ==> $target_harbor_host/cmii/$app_name:$app_tag"
|
||||
docker tag "$cmii_harbor_prefix$app_name:$app_tag" "$target_harbor_host/cmii/$app_name:$app_tag"
|
||||
|
||||
echo "start to push => $target_harbor_host/cmii/$app_name:$app_tag"
|
||||
docker login -u admin -p V2ryStr@ngPss $target_harbor_host
|
||||
docker push "$target_harbor_host/cmii/$app_name:$app_tag"
|
||||
|
||||
fi
|
||||
|
||||
echo ""
|
||||
done
|
||||
|
||||
# for image in "${rke_image_list[@]}"; do
|
||||
# echo "start to load => $image"
|
||||
# docker load <"${rke_image_local_prefix}/$image"
|
||||
# echo ""
|
||||
# local app_name_prefix=$(echo $image | cut -d "=" -f2)
|
||||
# local app_name=$(echo $image | cut -d "=" -f3)
|
||||
# local ccc=$(echo $image | cut -d "=" -f4)
|
||||
# local app_tag="${ccc%.tar.gz}"
|
||||
# echo "from $app_name_prefix/$app_name:$app_tag ==> $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# docker tag "$app_name_prefix/$app_name:$app_tag" "$target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# echo "start to push => $target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# docker login -u admin -p V2ryStr@ngPss $target_harbor_host
|
||||
# docker push "$target_harbor_host/rancher/$app_name:$app_tag"
|
||||
# echo ""
|
||||
# done
|
||||
|
||||
}
|
||||
|
||||
create_harbor_project() {
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$target_harbor_host/api/v2.0/projects
|
||||
|
||||
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"rancher","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$target_harbor_host/api/v2.0/projects
|
||||
}
|
||||
|
||||
cmii_image_download_from_oss
|
||||
|
||||
#rke_image_download_from_oss
|
||||
|
||||
#create_harbor_project
|
||||
|
||||
image_load_to_harbor
|
||||
72
21-资阳移动/images-uavcloud-demo-2024-03-05-16-16-31.txt
Normal file
72
21-资阳移动/images-uavcloud-demo-2024-03-05-16-16-31.txt
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-media:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-security:4.1.6
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-securityh5:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervision:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-armypeople:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-open:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-share:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-base:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-emergency-rescue:5.2.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-logistics:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-qinghaitourism:4.1.0-21377-0508
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-seniclive:5.2.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.4.0-hotfix
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:5.3.0-hjltt
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-jiangsuwenlv:4.1.3-jiangsu-0427
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-multiterminal:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-visualization:5.2.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-suav-platform-supervisionh5:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-ai-brain:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-cms-portal:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-detection:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-mws:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-oms:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform:4.1.6-24238-qingdao
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-splice:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-platform-threedsimulation:5.2.0-21392
|
||||
---
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-oauth:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-brain:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-cms:5.3.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-developer:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-kpi-monitor:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-integration:5.4.0-0228
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-material-warehouse:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-mission:5.4.0-26462-0228
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-surveillance:5.4.0-24227
|
||||
harbor.cdcyy.com.cn/cmii/cmii-app-release:4.2.0-validation
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-cloud-live:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-depotautoreturn:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-emergency:5.3.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-user:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-open-gateway:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-data-post-process:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-user:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-mqtthandler:5.4.0-26768
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-notice:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-process:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-suav-supervision:5.2.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-airspace:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-autowaypoint:4.1.6-cm
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-gateway:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-alarm:5.3.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-clusters:5.2.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-device:5.4.0-26905
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-industrial-portfolio:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-data:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-logger:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-waypoint:5.4.0-26768
|
||||
harbor.cdcyy.com.cn/cmii/cmii-admin-gateway:5.4.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-threedsimulation:5.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-tower:5.4.0
|
||||
---
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-datasource:5.2.0-24810
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-engine:5.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-grid-manage:5.1.0
|
||||
harbor.cdcyy.com.cn/cmii/cmii-uav-gis-server:5.4.0
|
||||
---
|
||||
ossrs/srs:v5.0.195
|
||||
harbor.cdcyy.com.cn/cmii/cmii-srs-oss-adaptor:2023-SA
|
||||
harbor.cdcyy.com.cn/cmii/cmii-live-operator:5.2.0
|
||||
---
|
||||
4399
21-资阳移动/k8s-backend.yaml
Normal file
4399
21-资阳移动/k8s-backend.yaml
Normal file
File diff suppressed because it is too large
Load Diff
488
21-资阳移动/k8s-srs.yaml
Normal file
488
21-资阳移动/k8s-srs.yaml
Normal file
@@ -0,0 +1,488 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-cm
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-srs
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
data:
|
||||
srs.rtc.conf: |-
|
||||
listen 30935;
|
||||
max_connections 4096;
|
||||
srs_log_tank console;
|
||||
srs_log_level info;
|
||||
srs_log_file /home/srs.log;
|
||||
daemon off;
|
||||
http_api {
|
||||
enabled on;
|
||||
listen 1985;
|
||||
crossdomain on;
|
||||
}
|
||||
stats {
|
||||
network 0;
|
||||
}
|
||||
http_server {
|
||||
enabled on;
|
||||
listen 8080;
|
||||
dir /home/hls;
|
||||
}
|
||||
srt_server {
|
||||
enabled on;
|
||||
listen 30556;
|
||||
maxbw 1000000000;
|
||||
connect_timeout 4000;
|
||||
peerlatency 600;
|
||||
recvlatency 600;
|
||||
}
|
||||
rtc_server {
|
||||
enabled on;
|
||||
listen 30090;
|
||||
candidate $CANDIDATE;
|
||||
}
|
||||
vhost __defaultVhost__ {
|
||||
http_hooks {
|
||||
enabled on;
|
||||
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
|
||||
}
|
||||
http_remux {
|
||||
enabled on;
|
||||
}
|
||||
rtc {
|
||||
enabled on;
|
||||
rtmp_to_rtc on;
|
||||
rtc_to_rtmp on;
|
||||
keep_bframe off;
|
||||
}
|
||||
tcp_nodelay on;
|
||||
min_latency on;
|
||||
play {
|
||||
gop_cache off;
|
||||
mw_latency 100;
|
||||
mw_msgs 10;
|
||||
}
|
||||
publish {
|
||||
firstpkt_timeout 8000;
|
||||
normal_timeout 4000;
|
||||
mr on;
|
||||
}
|
||||
dvr {
|
||||
enabled off;
|
||||
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
|
||||
dvr_plan session;
|
||||
}
|
||||
hls {
|
||||
enabled on;
|
||||
hls_path /home/hls;
|
||||
hls_fragment 10;
|
||||
hls_window 60;
|
||||
hls_m3u8_file [app]/[stream].m3u8;
|
||||
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
|
||||
hls_cleanup on;
|
||||
hls_entry_prefix http://10.18.70.43:8888;
|
||||
}
|
||||
}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc-exporter
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
nodePort: 30935
|
||||
- name: rtc
|
||||
protocol: UDP
|
||||
port: 30090
|
||||
targetPort: 30090
|
||||
nodePort: 30090
|
||||
- name: srt
|
||||
protocol: UDP
|
||||
port: 30556
|
||||
targetPort: 30556
|
||||
nodePort: 30556
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
nodePort: 30557
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
externalTrafficPolicy: Cluster
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srsrtc-svc
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-srs-rtc
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-srs
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
srs-role: rtc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
srs-role: rtc
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
srs-role: rtc
|
||||
spec:
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: helm-live-srs-cm
|
||||
items:
|
||||
- key: srs.rtc.conf
|
||||
path: docker.conf
|
||||
defaultMode: 420
|
||||
- name: srs-vol
|
||||
emptyDir:
|
||||
sizeLimit: 8Gi
|
||||
containers:
|
||||
- name: srs-rtc
|
||||
image: '192.168.6.6:8033/cmii/srs:v5.0.195'
|
||||
ports:
|
||||
- name: srs-rtmp
|
||||
containerPort: 30935
|
||||
protocol: TCP
|
||||
- name: srs-api
|
||||
containerPort: 1985
|
||||
protocol: TCP
|
||||
- name: srs-flv
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
- name: srs-webrtc
|
||||
containerPort: 30090
|
||||
protocol: UDP
|
||||
- name: srs-srt
|
||||
containerPort: 30556
|
||||
protocol: UDP
|
||||
env:
|
||||
- name: CANDIDATE
|
||||
value: 10.18.70.43
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 6Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /usr/local/srs/conf/docker.conf
|
||||
subPath: docker.conf
|
||||
- name: srs-vol
|
||||
mountPath: /home/dvr
|
||||
subPath: zyga/helm-live/dvr
|
||||
- name: srs-vol
|
||||
mountPath: /home/hls
|
||||
subPath: zyga/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
- name: oss-adaptor
|
||||
image: '192.168.6.6:8033/cmii/cmii-srs-oss-adaptor:2023-SA'
|
||||
env:
|
||||
- name: OSS_ENDPOINT
|
||||
value: 'http://192.168.6.4:9000'
|
||||
- name: OSS_AK
|
||||
value: cmii
|
||||
- name: OSS_SK
|
||||
value: 'B#923fC7mk'
|
||||
- name: OSS_BUCKET
|
||||
value: live-cluster-hls
|
||||
- name: SRS_OP
|
||||
value: 'http://helm-live-op-svc-v2:8080'
|
||||
- name: MYSQL_ENDPOINT
|
||||
value: 'helm-mysql:3306'
|
||||
- name: MYSQL_USERNAME
|
||||
value: k8s_admin
|
||||
- name: MYSQL_PASSWORD
|
||||
value: fP#UaH6qQ3)8
|
||||
- name: MYSQL_DATABASE
|
||||
value: cmii_live_srs_op
|
||||
- name: MYSQL_TABLE
|
||||
value: live_segment
|
||||
- name: LOG_LEVEL
|
||||
value: info
|
||||
- name: OSS_META
|
||||
value: 'yes'
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-vol
|
||||
mountPath: /cmii/share/hls
|
||||
subPath: zyga/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity: {}
|
||||
schedulerName: default-scheduler
|
||||
serviceName: helm-live-srsrtc-svc
|
||||
podManagementPolicy: OrderedReady
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: 0
|
||||
revisionHistoryLimit: 10
|
||||
---
|
||||
# live-srs部分
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-op-v2
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-engine
|
||||
cmii.type: midware
|
||||
helm.sh/chart: cmlc-live-live-op-2.0.0
|
||||
live-role: op-v2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
live-role: op-v2
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
live-role: op-v2
|
||||
spec:
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: helm-live-op-cm-v2
|
||||
items:
|
||||
- key: live.op.conf
|
||||
path: bootstrap.yaml
|
||||
defaultMode: 420
|
||||
containers:
|
||||
- name: operator
|
||||
image: '192.168.6.6:8033/cmii/cmii-live-operator:5.2.0'
|
||||
ports:
|
||||
- name: operator
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4800m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /cmii/bootstrap.yaml
|
||||
subPath: bootstrap.yaml
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cmii/ping
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /cmii/ping
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity: {}
|
||||
schedulerName: default-scheduler
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 25%
|
||||
maxSurge: 25%
|
||||
revisionHistoryLimit: 10
|
||||
progressDeadlineSeconds: 600
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-svc-v2
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
nodePort: 30333
|
||||
selector:
|
||||
live-role: op-v2
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-svc
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
selector:
|
||||
live-role: op
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-cm-v2
|
||||
namespace: zyga
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
cmii.app: live-engine
|
||||
cmii.type: midware
|
||||
data:
|
||||
live.op.conf: |-
|
||||
server:
|
||||
port: 8080
|
||||
|
||||
spring:
|
||||
main:
|
||||
allow-bean-definition-overriding: true
|
||||
allow-circular-references: true
|
||||
application:
|
||||
name: cmii-live-operator
|
||||
platform:
|
||||
info:
|
||||
name: cmii-live-operator
|
||||
description: cmii-live-operator
|
||||
version: 4.0.6
|
||||
scanPackage: com.cmii.live.op
|
||||
cloud:
|
||||
nacos:
|
||||
config:
|
||||
username: developer
|
||||
password: N@cos14Good
|
||||
server-addr: helm-nacos:8848
|
||||
extension-configs:
|
||||
- data-id: cmii-live-operator.yml
|
||||
group: 5.4.0
|
||||
refresh: true
|
||||
shared-configs:
|
||||
- data-id: cmii-backend-system.yml
|
||||
group: 5.4.0
|
||||
refresh: true
|
||||
discovery:
|
||||
enabled: false
|
||||
|
||||
live:
|
||||
engine:
|
||||
type: srs
|
||||
endpoint: 'http://helm-live-srs-svc:1985'
|
||||
|
||||
proto:
|
||||
rtmp: 'rtmp://10.18.70.43:30935'
|
||||
rtsp: 'rtsp://10.18.70.43:30554'
|
||||
srt: 'srt://10.18.70.43:30556'
|
||||
flv: 'http://10.18.70.43:30500'
|
||||
hls: 'http://10.18.70.43:30500'
|
||||
rtc: 'webrtc://10.18.70.43:30557'
|
||||
replay: 'https://10.18.70.43:30333'
|
||||
minio:
|
||||
endpoint: http://192.168.6.4:9000
|
||||
access-key: cmii
|
||||
secret-key: B#923fC7mk
|
||||
bucket: live-cluster-hls
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
13
21-资阳移动/批量执行.sh
Normal file
13
21-资阳移动/批量执行.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
|
||||
host_list=(192.168.6.3 192.168.6.7 192.168.6.5)
|
||||
for host in ${host_list[@]}
|
||||
do
|
||||
echo "current host is $host"
|
||||
ssh -p 2022 root@"$host" "cp /etc/ssh/sshd_config /etc/ssh/sshd_config_zyly"
|
||||
scp -P 2022 /etc/ssh/sshd_config root@"$host":/etc/ssh/sshd_config
|
||||
ssh -p 2022 root@"$host" "systemctl restart sshd"
|
||||
echo ""
|
||||
|
||||
done
|
||||
|
||||
Reference in New Issue
Block a user