Files
CmiiDeploy/48-202412-浙江移动扩容/全量备份/all-configmaps-zjyd.yaml
2024-12-18 17:42:35 +08:00

1033 lines
28 KiB
YAML

---
apiVersion: v1
data:
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
cmii.type: middleware
helm.sh/chart: emqx-1.1.0
name: helm-emqxs-cm
namespace: zjyd
---
apiVersion: v1
data:
EMQX_ACL_NOMATCH: deny
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_CLUSTER__DISCOVERY: k8s
EMQX_CLUSTER__K8S__ADDRESS_TYPE: dns
EMQX_CLUSTER__K8S__APISERVER: https://kubernetes.default.svc.cluster.local:443
EMQX_CLUSTER__K8S__APP_NAME: helm-emqxs
EMQX_CLUSTER__K8S__SERVICE_NAME: helm-emqxs-headless
EMQX_CLUSTER__K8S__SUFFIX: svc.cluster.local
EMQX_CLUSTER__K8S__namespace: zjyd
EMQX_NAME: helm-emqxs
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 5.7.0
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
cmii.type: middleware
helm.sh/chart: emqx-1.1.0
name: helm-emqxs-env
namespace: zjyd
---
apiVersion: v1
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 5.7.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: Dron@2468
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 5.7.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 5.7.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://111.2.224.59:31935'
rtsp: 'rtsp://188.106.25.136:30554'
srt: 'srt://188.106.25.136:30556'
flv: 'http://111.2.224.59:8088'
hls: 'http://111.2.224.59:8088'
rtc: 'webrtc://111.2.224.59:8088'
replay: 'https://helm-live-op-v2.ig-xlbg.uavcmlc.com:31500'
minio:
endpoint: http://192.168.10.2:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-srs-hls
kind: ConfigMap
metadata:
labels:
cmii.app: live-engine
cmii.type: midware
name: helm-live-op-cm-v2
namespace: zjyd
---
apiVersion: v1
data:
srs.rtc.conf: |-
listen 32006;
max_connections 4096;
srs_log_tank file;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://188.106.25.136:8888;
}
}
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: Helm
cmii.app: live-srs
cmii.type: midware
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
name: helm-live-srs-cm
namespace: zjyd
---
apiVersion: v1
data:
my.cnf: |2-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjyd
octopus.control: mysql-db-wdd
name: helm-mysql
namespace: zjyd
---
apiVersion: v1
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: zjyd
octopus.control: mysql-db-wdd
name: helm-mysql-init-scripts
namespace: zjyd
---
apiVersion: v1
data:
mysql.db.host: helm-mysql
mysql.db.name: cmii_nacos_config
mysql.password: fP#UaH6qQ3)8
mysql.port: "3306"
mysql.user: k8s_admin
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 5.7.0
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
name: helm-nacos-cm
namespace: zjyd
---
apiVersion: v1
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: rabbitmq
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: zjyd
helm.sh/chart: rabbitmq-8.26.1
name: helm-rabbitmq-config
namespace: zjyd
---
apiVersion: v1
data:
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zjyd
octopus.control: redis-db-wdd
name: helm-redis-configuration
namespace: zjyd
---
apiVersion: v1
data:
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zjyd
octopus.control: redis-db-wdd
name: helm-redis-health
namespace: zjyd
---
apiVersion: v1
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: "#!/bin/bash\n\nget_port() {\n hostname=\"$1\"\n type=\"$2\"\n\n port_var=$(echo \"${hostname^^}_SERVICE_PORT_$type\" | sed \"s/-/_/g\")\n port=${!port_var}\n \n if [ -z \"$port\" ]; then\n case $type in\n \"SENTINEL\")\n echo 26379\n ;;\n \"REDIS\")\n echo 6379\n ;;\n esac\n else\n echo $port\n fi\n}\n\nget_full_hostname() {\n hostname=\"$1\"\n echo \"${hostname}.${HEADLESS_SERVICE}\"\n}\n\nREDISPORT=$(get_port \"$HOSTNAME\" \"REDIS\")\n\n[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD=\"$(< \"${REDIS_PASSWORD_FILE}\")\"\n[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD=\"$(< \"${REDIS_MASTER_PASSWORD_FILE}\")\"\nif [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf\nfi\nif [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf\nfi\n\necho \"\" >> /opt/bitnami/redis/etc/replica.conf\necho \"replica-announce-port $REDISPORT\" >> /opt/bitnami/redis/etc/replica.conf\necho \"replica-announce-ip $(get_full_hostname \"$HOSTNAME\")\" >> /opt/bitnami/redis/etc/replica.conf\nARGS=(\"--port\" \"${REDIS_PORT}\")\nARGS+=(\"--slaveof\" \"${REDIS_MASTER_HOST}\" \"${REDIS_MASTER_PORT_NUMBER}\")\nARGS+=(\"--requirepass\" \"${REDIS_PASSWORD}\")\nARGS+=(\"--masterauth\" \"${REDIS_MASTER_PASSWORD}\")\nARGS+=(\"--include\" \"/opt/bitnami/redis/etc/redis.conf\")\nARGS+=(\"--include\" \"/opt/bitnami/redis/etc/replica.conf\")\nexec redis-server \"${ARGS[@]}\"\n"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: zjyd
octopus.control: redis-db-wdd
name: helm-redis-scripts
namespace: zjyd
---
apiVersion: v1
data:
ca.crt: |
-----BEGIN CERTIFICATE-----
MIICwjCCAaqgAwIBAgIBADANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwdrdWJl
LWNhMB4XDTI0MDcyNDExMzEyM1oXDTM0MDcyMjExMzEyM1owEjEQMA4GA1UEAxMH
a3ViZS1jYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJwUD11pvw7V
efqm7G+Z+BArR+pRLE6PrmXaY6Ae/3A2S/MIE/bMA2iM0eE+Nfl5UgDTIHKaLI2l
idahKPCtntDJ0i0HE3Y1PAIdeJcSCATcqmQZb9dA/gRbg069RXQ6gcX2jkwszTF/
Ir3mbZc2nDQ4Sb3E060GX21KL519AzXL3sHOrw/CD8ZCihSmuO5LVtrC+fAyRS0O
pDCfR8znbW2lHPCSz2w3L8dd+djBrdfbImN1zTVfs63aZHhFuMlInTG4iHp45X2F
XKp41oiRwmSOYyMQAoCoMnlhiSoIsEW6aqVcPe2FUT90GT7+83m0p2XqPWf2XdoX
qsjSBtEBz7MCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMB
Af8wDQYJKoZIhvcNAQELBQADggEBAEwPekE9ZurJADhl/4fuyXXMx5k2G/vhwNRw
8U7SxOh1hqFF2I6JtXGjOtXNMQOco4UHXiOkVGoyiKHHknxvl6WrRy2/nuyWzho2
QH/eizpR+aHc/4GW/5DUeKe3h1Wx7XNgZxmfTUJnhcnKe2/NXH+v1ptiK3Jv5Xpl
BpeXzBiKNYRIUXfxCUsABVEglcUY1tf9nc6oaI7atHM2Cv21cKVwpDsuisrO+oBP
snUQbaJ6K996uXDVjSXdFfI1l6FwJ08LHP0VLIoLWPho/i0ruWLU4peryHqjbkkV
00eXskdQUy6PNToKsqT7xDlGLZNgG7mI3C8wpyTua0d3Weodyio=
-----END CERTIFICATE-----
kind: ConfigMap
metadata:
name: kube-root-ca.crt
namespace: zjyd
---
apiVersion: v1
data:
nginx.conf: |
server {
listen 9528;
server_name localhost;
gzip on;
location / {
root /home/cmii-platform/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
kind: ConfigMap
metadata:
labels:
cmii.type: frontend
name: nginx-cm
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
kind: ConfigMap
metadata:
name: tenant-prefix-ai-brain
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U",
AppKey: "",
TdtToken: "4d57b106d661128173e586d26417eed5"
}
kind: ConfigMap
metadata:
name: tenant-prefix-armypeople
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
kind: ConfigMap
metadata:
name: tenant-prefix-base
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-cmsportal
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
kind: ConfigMap
metadata:
name: tenant-prefix-detection
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-dispatchh5
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
kind: ConfigMap
metadata:
name: tenant-prefix-emergency
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-hljtt
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
kind: ConfigMap
metadata:
name: tenant-prefix-logistics
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
kind: ConfigMap
metadata:
name: tenant-prefix-media
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
kind: ConfigMap
metadata:
name: tenant-prefix-multiterminal
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
kind: ConfigMap
metadata:
name: tenant-prefix-mws
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-oms
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-open
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "",
AppClientId: "empty",
TdtToken: "4d57b106d661128173e586d26417eed5"
}
kind: ConfigMap
metadata:
name: tenant-prefix-pangu
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-qingdao
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-qinghaitourism
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
kind: ConfigMap
metadata:
name: tenant-prefix-security
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
kind: ConfigMap
metadata:
name: tenant-prefix-securityh5
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-seniclive
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
kind: ConfigMap
metadata:
name: tenant-prefix-share
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
kind: ConfigMap
metadata:
name: tenant-prefix-splice
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
kind: ConfigMap
metadata:
name: tenant-prefix-supervision
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
kind: ConfigMap
metadata:
name: tenant-prefix-supervisionh5
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-threedsimulation
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
kind: ConfigMap
metadata:
name: tenant-prefix-traffic
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-uas
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-uasms
namespace: zjyd
---
apiVersion: v1
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "111.2.224.59:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
kind: ConfigMap
metadata:
name: tenant-prefix-visualization
namespace: zjyd