This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname master-node
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
20.4.13.81 master-node
20.4.13.140 worker-1
20.4.13.92 worker-2
20.4.13.80 storage-1
EOF
yum clean all && yum makecache
root
10.190.217.227
3Ycg4ZPsG#Z!

View File

@@ -0,0 +1,95 @@
#! /bin/bash
# 关闭虚拟缓存
# swapoff -a
# cp -f /etc/fstab /etc/fstab_bak
# cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
# yum install lvm2 -y
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/sdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
echo "n
p
t
8e
w
" | fdisk /dev/vda
partprobe
vgextend klas_host-10-190-202-141 /dev/vda4
lvextend -l +100%FREE /dev/mapper/klas_host--10--190--202--141-root
partprobe
xfs_growfs /dev/mapper/klas_host--10--190--202--141-root
df -TH

View File

@@ -0,0 +1,13 @@
version: '2'
services:
minio1:
ports:
- "9000:9000"
- "9001:9001"
image: '20.4.13.81:8033/cmii/minio:2023.5.4'
environment:
- MINIO_ROOT_USER=cmii
- MINIO_ROOT_PASSWORD=B#923fC7mk
volumes:
- /var/lib/docker/minio-pv/:/mnt/data

View File

@@ -0,0 +1,31 @@
server {
listen 8889;
server_name localhost;
#允许跨域请求的域,*代表所有
add_header 'Access-Control-Allow-Origin' *;
#允许带上cookie请求
add_header 'Access-Control-Allow-Credentials' 'true';
#允许请求的方法,比如 GET/POST/PUT/DELETE
add_header 'Access-Control-Allow-Methods' *;
#允许请求的header
add_header 'Access-Control-Allow-Headers' *;
location /electronic {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
location /satellite {
root /root/offline_map/;
autoindex on;
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET,POST';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
}
# http://20.4.13.81:8889/electronic/{z}/{x}/{y}.png
# http://20.4.13.81:8889/satellite/{z}/{x}/{y}.png
}

View File

@@ -0,0 +1,43 @@
server {
listen 8888;
server_name localhost;
location / {
proxy_pass http://localhost:30500;
client_max_body_size 5120m;
client_body_buffer_size 5120m;
client_body_timeout 6000s;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
proxy_connect_timeout 600s;
proxy_max_temp_file_size 5120m;
proxy_request_buffering on;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 12k;
proxy_set_header Host fake-domain.cqga.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /_AMapService/v4/map/styles {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://webapi.amap.com/v4/ap/styles;
}
location /_AMapService/ {
set $args "$args&jscode=cf66cea95bdcdfcf8048456b36f357a1";
proxy_pass https://restapi.amap.com/;
}
location /rtc/v1/ {
add_header Access-Control-Allow-Headers X-Requested-With;
add_header Access-Control-Allow-Methods GET,POST,OPTIONS;
proxy_pass http://127.0.0.1:30985/rtc/v1/;
}
location ~ ^/\w*/actuator/ {
return 403;
}
}

View File

@@ -0,0 +1,43 @@
user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 65535;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
send_timeout 1200;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 600;
types_hash_max_size 2048;
client_max_body_size 2048m;
client_body_buffer_size 2048m;
underscores_in_headers on;
proxy_send_timeout 600;
proxy_read_timeout 600;
proxy_connect_timeout 600;
proxy_buffer_size 128k;
proxy_buffers 8 256k;
include /etc/nginx/conf.d/*.conf;
}
stream {
include /etc/nginx/conf.d/stream/*.conf;
}

File diff suppressed because it is too large Load Diff

2298
16-重庆公安/7-front.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# 替换namespace
# 替换minio的实际地址和端口
# 修改rabbitmq的实际地址和端口需要暴露出来
# curl https://dl.min.io/client/mc/release/linux-amd64/mc
export tenant_name=zqga
mc alias set ${tenant_name} http://20.4.13.80:9000 cmii B#923fC7mk
mc mb ${tenant_name}/jadenq ${tenant_name}/tus ${tenant_name}/thumbnail ${tenant_name}/pub-cms ${tenant_name}/live-srs-hls/ ${tenant_name}/mission/ ${tenant_name}/surveillance ${tenant_name}/playback ${tenant_name}/tower ${tenant_name}/modelprocess ${tenant_name}/srs-hls ${tenant_name}/live-cluster-hls
# mc alias set demo https://oss.demo.uavcmlc.com:18000 cmii B#923fC7mk
#
# mc cp -r demo/jadenq/scenariomock/xg/ ${tenant_name}/jadenq/scenariomock/xg/
# mc cp -r demo/jadenq/application/file/中移凌云使用手册.pdf ${tenant_name}/jadenq/application/file/中移凌云使用手册.pdf
# mc cp -r demo/jadenq/defimage/def.jpg ${tenant_name}/jadenq/defimage/def.jpg
# mc cp -r demo/pub-cms/application/img/ ${tenant_name}/pub-cms/application/img/
mc admin config set ${tenant_name} notify_amqp:1 delivery_mode="2" exchange_type="direct" no_wait="off" queue_dir="" queue_limit="0" url="amqp://admin:nYcRN91r._hj@20.4.13.81:35672" auto_deleted="off" durable="on" exchange="cmii.chinamobile.minio.event" internal="off" mandatory="off" routing_key="cmii.chinamobile.material.warehouse"
mc admin service restart ${tenant_name}
mc event add ${tenant_name}/mission arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/modelprocess arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-srs-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/playback arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/live-cluster-hls arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/surveillance arn:minio:sqs::1:amqp --event put
mc event add ${tenant_name}/tus arn:minio:sqs::1:amqp --event delete
mc ilm add --expiry-days "1" ${tenant_name}/tus

View File

@@ -0,0 +1,486 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 30935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://20.4.13.81:8888;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
nodePort: 30935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30557
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- name: rtmp
protocol: TCP
port: 30935
targetPort: 30935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
creationTimestamp: null
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: '20.4.13.81:8033/cmii/srs:v5.0.195'
ports:
- name: srs-rtmp
containerPort: 30935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 20.4.13.81
resources:
limits:
cpu: 1200m
memory: 6Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: cqga/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: cqga/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: '20.4.13.81:8033/cmii/cmii-srs-oss-adaptor:2023-SA'
env:
- name: OSS_ENDPOINT
value: 'http://20.4.13.80:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 1200m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: cqga/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
creationTimestamp: null
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: operator
image: '20.4.13.81:8033/cmii/cmii-live-operator:5.2.0'
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/ping
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: cqga
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-engine
cmii.type: midware
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.2.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: developer
password: N@cos14Good
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.2.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.2.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://20.4.13.81:30935'
rtsp: 'rtsp://20.4.13.81:30554'
srt: 'srt://20.4.13.81:30556'
flv: 'http://20.4.13.81:30500'
hls: 'http://20.4.13.81:30500'
rtc: 'webrtc://20.4.13.81:30557'
replay: 'https://20.4.13.81:30333'
minio:
endpoint: http://20.4.13.80:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls
---

4398
16-重庆公安/9-back.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
all_server_list=(20.4.13.81 20.4.13.140 20.4.13.92 20.4.13.80)
all_base_command(){
for server in "${all_server_list[@]}"
do
echo "server is $server"
ssh root@"$server" "echo yes"
ssh root@"$server" "date -R"
done
}
all_base_command

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Config
clusters:
- cluster:
api-version: v1
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN3akNDQWFxZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkcmRXSmwKTFdOaE1CNFhEVEkwTURFeU9ERTBNVFV5TjFvWERUTTBNREV5TlRFME1UVXlOMW93RWpFUU1BNEdBMVVFQXhNSAphM1ZpWlMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtnRDE0M0pGL2JzCldvb1gvZUZ5MEppU0tzbkpxaGl5Wm81Q2VBV0xJV3FScEJRUmw2ekpYUWl3OUVYMFN3UkhuSFJVMVVnT25mOWoKUm5hb3k1YWJ1UEZkQ3YzNmdDZ0NGQWRIYUZXUENOdnp4eGsybng5dmdZS2hZdjBCU2lLWitzdktmMkJ2akpvZQpaZElTTjlUZEI5WlRNVlVEWlordDlGZWdrRlF5eENJd3pWbXlXL0J4UWJlWkpSekxaK2VDS1dhbURmOGZDY2huCmdDdSticyswRkhPTkhDNUl3OTZldyt0ZlV3ZlpjREVwd2w5bS8xR2FJZGNBRk03aHdodkxONEF0anRUZEVybUMKVXJWamplWCsrMTQrNW05RXhuSHI1OXgwNVIvZ0JHNUtGMjFlTDAvSlZuQ2lUOFlyeVRib0tlVVFHQzFyOFpVSQpPOFkyMkRSM1cyMENBd0VBQWFNak1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CCkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRTRlcUhZeWI0bHNreCtCKzJxT0RoaUU3b2lTd1dMRlUxMUEKOVBTTWFlWWJkMDFvSHgwVFZ4bGRkRXpFL2RxWkxrWi8vSW54bGVYRGNUQlFzeTRmUVVkWUxlWUZ0U3lpS3FEUQpKZkw1WVFmMjBJZnlmM05wT284M1hCTWhKRytNTUNocUJ1Q2R0dHRSN0VFeG1pc2twQ2xValJNNlRzSG5oRlZJClVPZ3hZNUtSU3lzTUFvTElXczhNSzVoQ29Hc0NKVlBuUGRrVXcvbkc3d0ROY3ZPNzJocTV3U1RmSHJuTnl5UkkKSDdheVU5TS9nVzRHcytUZHphZlp4NG1Vd1F4YUMzN0tIVnhRMUlzNW1EVjhYUmxvYXpuU0VGT2pZanVuWEdocQpZRFg5QnJpT2dvWTdObkFlYmZpVWhQRW5ZV3hwRVZCUVVBNzdUeVZ6ejRvMHlhL3JtcUU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: "https://20.4.13.81:6443"
name: "rke-cluster"
contexts:
- context:
cluster: "rke-cluster"
user: "kube-admin-rke-cluster"
name: "rke-cluster"
current-context: "rke-cluster"
users:
- name: "kube-admin-rke-cluster"
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lJV0h2UGh1bk40TTR3RFFZSktvWklodmNOQVFFTEJRQXdFakVRTUE0R0ExVUUKQXhNSGEzVmlaUzFqWVRBZUZ3MHlOREF4TWpneE5ERTFNamRhRncwek5EQXhNalV4TkRFMk1UaGFNQzR4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVJNd0VRWURWUVFERXdwcmRXSmxMV0ZrYldsdU1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXF6bml5elRGd2p4SEhhV2lPQ1cxeGVPem1jZzAKSElZdkM0VDV3ZjEyTzNVK1pUajRvclNXNUlpK0p4TnV4VmVsc3pjaExYVjg1NTF3MVUvTXJuazhZZWJWNzBITQpPTFlveDNJUVVhazdvVlpYV2NEdTAvQzMrRFo3YmdJR294MjNyWHVqUEhKby9rZndORldlZ1hiTmxWN1I4bmdYClNVdDVpVG5mcWNtOHFrbExVaFF4UXI4dU83NkUvSDZ3czQxcmlHd2JsT1FxNGdONUZ4SDZCMStJUmw4eCsya2IKaHhSZDRJT1AyTDhFSHVYdEtaNGFDMWFzVEI1N0pNaDNuQTZZbkEzYmNwTVJTUlM0bTRBenFqa2c3ZlZRYTMvdQo1Vmh0bnh5dDh0MXJBSHl2ajY0STVuZjB2S1ltZ1ZpMUk3aklIUXlsaExSQzVyOFVlSXA3L2IzbXR3SURBUUFCCm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFGSDhON1JDNkVaWEJWbXNEYzJvUUFyTjMvS29rUFF1MWJjelJOZFdxVFhPZmxpZQpiUG5Ka3V1TkhpTldVR0JhRkkzTWNuSThKSmFqZ01hSmtOa3MyMDZHcmVPQWxnbUNhbEFTME9EbmthbjdrQnZTCktWaGtRd3c0TXBxY3BEL0phQVo0bC9MVXowMHZjYVR2WUFsUHV4dEZzUlpwdVh2TElzdXM4R2ovMnpvTmtDU3IKVEpFQU1wWVdWTDlkVlJpOXA4RHpQZU9zU242Q096aXBZcDFkTWxMMlpadk1PWEhnbnJHTXNOUnVURGtvbUJQMwo2czhWQjNRZXc4YndYODNIVE5XNG9MZUFQa0xhU1lFZ29ldGFjdFkyMTZYL1NIVENQd3RLUFRlNE53bUdybFlFCkRMTnhGK0FnMitSMWZUU1diM2RIK1BKSmhuVXZ4WVNsV0pqMVVjdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcXpuaXl6VEZ3anhISGFXaU9DVzF4ZU96bWNnMEhJWXZDNFQ1d2YxMk8zVStaVGo0Cm9yU1c1SWkrSnhOdXhWZWxzemNoTFhWODU1MXcxVS9Ncm5rOFllYlY3MEhNT0xZb3gzSVFVYWs3b1ZaWFdjRHUKMC9DMytEWjdiZ0lHb3gyM3JYdWpQSEpvL2tmd05GV2VnWGJObFY3UjhuZ1hTVXQ1aVRuZnFjbThxa2xMVWhReApRcjh1Tzc2RS9INndzNDFyaUd3YmxPUXE0Z041RnhINkIxK0lSbDh4KzJrYmh4UmQ0SU9QMkw4RUh1WHRLWjRhCkMxYXNUQjU3Sk1oM25BNlluQTNiY3BNUlNSUzRtNEF6cWprZzdmVlFhMy91NVZodG54eXQ4dDFyQUh5dmo2NEkKNW5mMHZLWW1nVmkxSTdqSUhReWxoTFJDNXI4VWVJcDcvYjNtdHdJREFRQUJBb0lCQVFDVllRZmFEaTBsT2w4MApueWI1NHZSRk0xU2hxZ2JFaDlBTk5iS0xLaXYyelY5bEFhSHdtS28zc0pCc0dNai93MnBkQXBDZ1R5cHNDbS9BCnVVQ3hTaldDbDljMlIybjZZL2tKbnhwYms4L2cvaWx3dFIvbXFOQm03R2tjWFFvYlRqYkxVOXppUkZJUzNRSFQKTm9yMUdZZGJkQUNuajF1Slp4NGR1TzdHK3Fmc3lsMFJUWkt0ZkhFcFdmZG95THlQd0srS09INFJrZk1HN3FmWQo2c25kZ0FNSGltcVZZc1dTV0JGVmNKbHdXdUVyODVDejFQdEtpditNNU9JMjRpUjgxRmt2NmE1b0xMK21NMkU2ClpINi9kYkRxNzlXdUl2MzlyMEFiM3RxdmZLNlFkS2JvVnJTOEQ5L1QyZ0Y2KzM3eE03MVBwWmE1blJyeFZwMngKenQzL1k4bHBBb0dCQU1oZllLR01YcWIyaE1GNFpZQzk4RjkzTU5US3RVTWVNRVVkZlZzbG1wMWc3eHZkbzRPWgpENDJic2RUektKdDhNUUJUOFVFYm1pNklDREROYi9pZWhMdXBwN05UMTAweTloL2RobTZIanc2dnZqWlAydStrCkhCSExDQjA3SDNaQ2VCL3JoQjJmTFF5bS9SR01BcHFnME00UkdTcnlUWWRJazB0OHVYb2h1ZENOQW9HQkFOckQKREpkNTRadFlDUE9Jbkd6ZFNtbjE1Rk8zU3VzVEIxSm40b3NRTTRTWG55Y3k0dFRsQm5DNHVVQ1JXTVlhajZiUwpyNFkrZG1udVkyeHBVUk5BMjdLcldMTlJEMFdUVFFJZ0IzNHZlUEoycWZrMlNYanA5cytYMnBqclFQaXNRVEt3ClZLN2pJemY3WlhUOHFEbXl4K09KWWpwR095eG5WQTR2aXRHeTBLMVRBb0dCQUkzVjdmb00rZ2RvSTJzc1JYV1AKZmpLbGxYeGJqVm5ZYWNUUzdpUnlCdk4yWld1dFJZem04UHFGaHFrK0FyV2xnSzVCdlZvZ0p0RGZYQ1RubDJZLwpGNmdibk9rMDVRZnhsZ1hTY3RHVXhiQll1T1dMTzJnOUc5MkFnZHJuT2dzbFhCRUVUdjRFNmdlMXdSZ0ZxdVk0CkJmYUlaZTlmbTMreTRRa2pYY3ZWU2NvVkFvR0FaQnp2SGtOQTQvNmQyaFdpd0k5bEJxanBIUWE4Mm1lUzZqdHcKaVlPWWtBMXpZRlVCU2J5bC9lQUxwMmpjMzR3NFFYZ1pxNjNhTXhCTm44ZHl1d1RyaGhDMFB4dlJpVjJuMkZrZwpGTGRVK21FajJGRG52dFlUaFYxUlJMb2t1YWpiRFg5MHdiUmc0YlhDOEk3UHpkZnJjazNsRGhZbDdtUTdLMkxHCmZNRUFqdEVDZ1lCdHA5SHowZE42ZkFOV2l4VkpiYVJkR25yQWptOEhjUEdlUjhxbVdFWFhlN2FSbXhFWTdKQ2MKTUwzSUovTTZFbDRNRGxzSVh6UmpKVjdaOEp5bGlPQURGMitLeERmcUlQOVNzb2xCQ0o4SVR4MTl6Qnk3c09QbgpDNkFMbE9CSU51UUV0a0JmWCtXVTYzN1grQ3NDQUFmWE1CZTduN3BEV0wwZkdFUWVJWnY1cUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@@ -0,0 +1,68 @@
#!/bin/bash
harbor_host=20.4.13.81:8033
namespace=cqga
app_name=""
new_tag=""
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"