延庆+山东项目

This commit is contained in:
zeaslity
2024-11-20 16:04:33 +08:00
parent 8c34f9018c
commit a0792c50cb
59 changed files with 16411 additions and 209 deletions

View File

@@ -3,9 +3,17 @@
master_server_list=(192.168.35.105 192.168.35.114 192.168.35.115) master_server_list=(192.168.35.105 192.168.35.114 192.168.35.115)
worker_server_list=(192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104) worker_server_list=(192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104) all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(dev-worker-01 dev-worker-02 dev-worker-03 dev-worker-05 dev-worker-06 dev-worker-07 dev-worker-08 dev-worker-09)
for server in "${all_server_list[@]}";do
echo "server is ${server}"
ssh root@"${server}" "systemctl restart docker"
done
count=1 count=1
for server in "${all_server_list[@]}";do for server in "${all_server_list[@]}";do
echo "server is ${server}" echo "server is ${server}"
@@ -53,10 +61,20 @@ for server in "${all_server_list[@]}";do
# ssh root@"${server}" "chmod +x /root/wdd/disk.sh && bash /root/wdd/disk.sh" # ssh root@"${server}" "chmod +x /root/wdd/disk.sh && bash /root/wdd/disk.sh"
# ssh root@"${server}" "docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn" # ssh root@"${server}" "docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn"
# ssh root@"${server}" "echo \"192.168.34.40 yfcsnfs.com\" >> /etc/hosts"
# ssh root@"${server}" "yum install -y nfs-utils rpcbind" # ssh root@"${server}" "yum install -y nfs-utils rpcbind"
ssh root@"${server}" "yum install -y chrony && systemctl start chronyd && systemctl enable chronyd" ssh root@"${server}" "yum install -y chrony && systemctl start chronyd && systemctl enable chronyd"
# ssh root@"${server}" "systemctl start nfs nfs-client nfs-common && systemctl enable nfs nfs-client nfs-common && systemctl status nfs nfs-client nfs-common -l | grep \"active (running)\"" # ssh root@"${server}" "systemctl start nfs nfs-client nfs-common && systemctl enable nfs nfs-client nfs-common && systemctl status nfs nfs-client nfs-common -l | grep \"active (running)\""
# nfs 相关内容
#scp /etc/docker/daemon.json root@${server}:/etc/docker/daemon.json
# ssh root@"${server}" "sed -i \"/yfcsnfs.com/d\" /etc/hosts"
# ssh root@"${server}" "cat /etc/hosts"
ssh root@"${server}" "grep -qxF \"nameserver 192.168.34.40\" /etc/resolv.conf || echo \"nameserver 192.168.34.40\" >> /etc/resolv.conf"
ssh root@"${server}" "dig yfcsnfs.com"
ssh root@"${server}" "echo \n\n\n"
done done
mv ctop-0.7.7-linux-amd64 /usr/local/bin/ctop
chmod +x /usr/local/bin/ctop

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,560 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-visualization
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "visualization",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-dispatchh5
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "dispatchh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-detection
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "detection",
AppClientId: "APP_FDHW2VLVDWPnnOCy"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-multiterminal
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "multiterminal",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-open
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "open",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-splice
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "splice",
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qinghaitourism
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "qinghaitourism",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervision
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "supervision",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-scanner
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "scanner",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hyper
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "hyper",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-supervisionh5
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "supervisionh5",
AppClientId: "APP_qqSu82THfexI8PLM"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-cmsportal
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "cmsportal",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-logistics
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "logistics",
AppClientId: "APP_PvdfRRRBPL8xbIwl"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mws
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "mws",
AppClientId: "APP_uKniXPELlRERBBwK"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-seniclive
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "seniclive",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-traffic
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "traffic",
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-jiangsuwenlv
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "jiangsuwenlv",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-classification
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "classification",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-eventsh5
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "eventsh5",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-base
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "base",
AppClientId: "APP_9LY41OaKSqk2btY0"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-share
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "share",
AppClientId: "APP_4lVSVI0ZGxTssir8"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-threedsimulation
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "threedsimulation",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smsecret
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "smsecret",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-securityh5
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "securityh5",
AppClientId: "APP_N3ImO0Ubfu9peRHD"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-hljtt
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "hljtt",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uasms
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "uasms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-mianyangbackend
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "mianyangbackend",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-emergency
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "emergency",
AppClientId: "APP_aGsTAY1uMZrpKdfk"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-security
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "security",
AppClientId: "APP_JUSEMc7afyWXxvE7"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-blockchain
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "blockchain",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-qingdao
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "qingdao",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-uas
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "uas",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pilot2cloud
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "pilot2cloud",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-pangu
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-ai-brain
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "ai-brain",
AppClientId: "APP_rafnuCAmBESIVYMH"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-armypeople
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "armypeople",
AppClientId: "APP_UIegse6Lfou9pO1U"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-media
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "media",
AppClientId: "APP_4AU8lbifESQO4FD6"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-oms
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "oms",
AppClientId: "empty"
}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tenant-prefix-smauth
namespace: sdejpt
data:
ingress-config.js: |-
var __GlobalIngressConfig = {
TenantEnvironment: "",
CloudHOST: "134.80.124.6:8088",
ApplicationShortName: "smauth",
AppClientId: "empty"
}

View File

@@ -0,0 +1,309 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: 134.80.124.7:8033/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: 134.80.124.7:8033/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,274 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: sdejpt
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: sdejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
data:
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_NAME: "helm-emqxs"
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__namespace: "sdejpt"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
EMQX_ALLOW_ANONYMOUS: "false"
EMQX_ACL_NOMATCH: "deny"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-cm
namespace: sdejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
data:
emqx_auth_mnesia.conf: |-
auth.mnesia.password_hash = sha256
# clientid 认证数据
# auth.client.1.clientid = admin
# auth.client.1.password = 4YPk*DS%+5
## username 认证数据
auth.user.1.username = admin
auth.user.1.password = odD8#Ve7.B
auth.user.2.username = cmlc
auth.user.2.password = odD8#Ve7.B
acl.conf: |-
{allow, {user, "admin"}, pubsub, ["admin/#"]}.
{allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.
{allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
{allow, all}.
loaded_plugins: |-
{emqx_auth_mnesia,true}.
{emqx_auth_mnesia,true}.
{emqx_management, true}.
{emqx_recon, true}.
{emqx_retainer, false}.
{emqx_dashboard, true}.
{emqx_telemetry, true}.
{emqx_rule_engine, true}.
{emqx_bridge_mqtt, false}.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: sdejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
affinity: {}
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: 134.80.124.7:8033/cmii/emqx:4.4.19
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
resources: {}
volumeMounts:
- name: emqx-data
mountPath: "/opt/emqx/data/mnesia"
readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/etc/plugins/emqx_auth_mnesia.conf"
subPath: emqx_auth_mnesia.conf
readOnly: false
# - name: helm-emqxs-cm
# mountPath: "/opt/emqx/etc/acl.conf"
# subPath: "acl.conf"
# readOnly: false
- name: helm-emqxs-cm
mountPath: "/opt/emqx/data/loaded_plugins"
subPath: loaded_plugins
readOnly: false
volumes:
- name: emqx-data
persistentVolumeClaim:
claimName: helm-emqxs
- name: helm-emqxs-cm
configMap:
name: helm-emqxs-cm
items:
- key: emqx_auth_mnesia.conf
path: emqx_auth_mnesia.conf
- key: acl.conf
path: acl.conf
- key: loaded_plugins
path: loaded_plugins
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: sdejpt
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: sdejpt
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: sdejpt
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: sdejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: sdejpt
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,696 @@
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: frontend-applications-ingress
namespace: sdejpt
labels:
type: frontend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
rewrite ^(/supervision)$ $1/ redirect;
rewrite ^(/supervisionh5)$ $1/ redirect;
rewrite ^(/pangu)$ $1/ redirect;
rewrite ^(/ai-brain)$ $1/ redirect;
rewrite ^(/armypeople)$ $1/ redirect;
rewrite ^(/base)$ $1/ redirect;
rewrite ^(/blockchain)$ $1/ redirect;
rewrite ^(/classification)$ $1/ redirect;
rewrite ^(/cmsportal)$ $1/ redirect;
rewrite ^(/detection)$ $1/ redirect;
rewrite ^(/dispatchh5)$ $1/ redirect;
rewrite ^(/emergency)$ $1/ redirect;
rewrite ^(/eventsh5)$ $1/ redirect;
rewrite ^(/hljtt)$ $1/ redirect;
rewrite ^(/hyper)$ $1/ redirect;
rewrite ^(/jiangsuwenlv)$ $1/ redirect;
rewrite ^(/logistics)$ $1/ redirect;
rewrite ^(/media)$ $1/ redirect;
rewrite ^(/mianyangbackend)$ $1/ redirect;
rewrite ^(/multiterminal)$ $1/ redirect;
rewrite ^(/mws)$ $1/ redirect;
rewrite ^(/oms)$ $1/ redirect;
rewrite ^(/open)$ $1/ redirect;
rewrite ^(/pilot2cloud)$ $1/ redirect;
rewrite ^(/qingdao)$ $1/ redirect;
rewrite ^(/qinghaitourism)$ $1/ redirect;
rewrite ^(/scanner)$ $1/ redirect;
rewrite ^(/security)$ $1/ redirect;
rewrite ^(/securityh5)$ $1/ redirect;
rewrite ^(/seniclive)$ $1/ redirect;
rewrite ^(/share)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
rewrite ^(/smsecret)$ $1/ redirect;
rewrite ^(/splice)$ $1/ redirect;
rewrite ^(/threedsimulation)$ $1/ redirect;
rewrite ^(/traffic)$ $1/ redirect;
rewrite ^(/uas)$ $1/ redirect;
rewrite ^(/uasms)$ $1/ redirect;
rewrite ^(/visualization)$ $1/ redirect;
rewrite ^(/smauth)$ $1/ redirect;
spec:
rules:
- host: fake-domain.sdejpt.io
http:
paths:
- path: /?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /supervision/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervision
servicePort: 9528
- path: /supervisionh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-platform-supervisionh5
servicePort: 9528
- path: /pangu/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform
servicePort: 9528
- path: /ai-brain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-ai-brain
servicePort: 9528
- path: /armypeople/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-armypeople
servicePort: 9528
- path: /base/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-base
servicePort: 9528
- path: /blockchain/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-blockchain
servicePort: 9528
- path: /classification/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-classification
servicePort: 9528
- path: /cmsportal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-cms-portal
servicePort: 9528
- path: /detection/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-detection
servicePort: 9528
- path: /dispatchh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-dispatchh5
servicePort: 9528
- path: /emergency/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-emergency-rescue
servicePort: 9528
- path: /eventsh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-eventsh5
servicePort: 9528
- path: /hljtt/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hljtt
servicePort: 9528
- path: /hyper/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-hyperspectral
servicePort: 9528
- path: /jiangsuwenlv/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-jiangsuwenlv
servicePort: 9528
- path: /logistics/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-logistics
servicePort: 9528
- path: /media/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-media
servicePort: 9528
- path: /mianyangbackend/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mianyangbackend
servicePort: 9528
- path: /multiterminal/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-multiterminal
servicePort: 9528
- path: /mws/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-mws
servicePort: 9528
- path: /oms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-oms
servicePort: 9528
- path: /open/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-open
servicePort: 9528
- path: /pilot2cloud/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-pilot2-to-cloud
servicePort: 9528
- path: /qingdao/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qingdao
servicePort: 9528
- path: /qinghaitourism/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-qinghaitourism
servicePort: 9528
- path: /scanner/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-scanner
servicePort: 9528
- path: /security/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-security
servicePort: 9528
- path: /securityh5/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-securityh5
servicePort: 9528
- path: /seniclive/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-seniclive
servicePort: 9528
- path: /share/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-share
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smauth
servicePort: 9528
- path: /smsecret/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-smsecret
servicePort: 9528
- path: /splice/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-splice
servicePort: 9528
- path: /threedsimulation/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-threedsimulation
servicePort: 9528
- path: /traffic/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-traffic
servicePort: 9528
- path: /uas/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uas
servicePort: 9528
- path: /uasms/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-uasms
servicePort: 9528
- path: /visualization/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-platform-visualization
servicePort: 9528
- path: /smauth/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: uavms-security-center-web
servicePort: 9528
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: backend-applications-ingress
namespace: sdejpt
labels:
type: backend
octopus.control: all-ingress-config-wdd
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: cmii-admin-data.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-data
servicePort: 8080
- host: cmii-admin-gateway.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- host: cmii-admin-user.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-user
servicePort: 8080
- host: cmii-app-release.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-app-release
servicePort: 8080
- host: cmii-open-gateway.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- host: cmii-suav-supervision.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-suav-supervision
servicePort: 8080
- host: cmii-uas-gateway.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-gateway
servicePort: 8080
- host: cmii-uas-lifecycle.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uas-lifecycle
servicePort: 8080
- host: cmii-uav-airspace.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-airspace
servicePort: 8080
- host: cmii-uav-alarm.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-alarm
servicePort: 8080
- host: cmii-uav-autowaypoint.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-autowaypoint
servicePort: 8080
- host: cmii-uav-brain.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-brain
servicePort: 8080
- host: cmii-uav-bridge.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-bridge
servicePort: 8080
- host: cmii-uav-cloud-live.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cloud-live
servicePort: 8080
- host: cmii-uav-clusters.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-clusters
servicePort: 8080
- host: cmii-uav-cms.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-cms
servicePort: 8080
- host: cmii-uav-data-post-process.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-data-post-process
servicePort: 8080
- host: cmii-uav-depotautoreturn.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-depotautoreturn
servicePort: 8080
- host: cmii-uav-developer.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-developer
servicePort: 8080
- host: cmii-uav-device.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-device
servicePort: 8080
- host: cmii-uav-emergency.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-emergency
servicePort: 8080
- host: cmii-uav-fwdd.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-fwdd
servicePort: 8080
- host: cmii-uav-gateway.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080
- host: cmii-uav-gis-server.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gis-server
servicePort: 8080
- host: cmii-uav-grid-datasource.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-datasource
servicePort: 8080
- host: cmii-uav-grid-engine.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-engine
servicePort: 8080
- host: cmii-uav-grid-manage.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-grid-manage
servicePort: 8080
- host: cmii-uav-industrial-portfolio.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-industrial-portfolio
servicePort: 8080
- host: cmii-uav-integration.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-integration
servicePort: 8080
- host: cmii-uav-iot-dispatcher.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-iot-dispatcher
servicePort: 8080
- host: cmii-uav-kpi-monitor.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-kpi-monitor
servicePort: 8080
- host: cmii-uav-logger.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-logger
servicePort: 8080
- host: cmii-uav-material-warehouse.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-material-warehouse
servicePort: 8080
- host: cmii-uav-mission.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mission
servicePort: 8080
- host: cmii-uav-mqtthandler.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-mqtthandler
servicePort: 8080
- host: cmii-uav-multilink.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-multilink
servicePort: 8080
- host: cmii-uav-notice.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-notice
servicePort: 8080
- host: cmii-uav-oauth.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-oauth
servicePort: 8080
- host: cmii-uav-process.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-process
servicePort: 8080
- host: cmii-uav-sense-adapter.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sense-adapter
servicePort: 8080
- host: cmii-uav-surveillance.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-surveillance
servicePort: 8080
- host: cmii-uav-sync.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-sync
servicePort: 8080
- host: cmii-uav-threedsimulation.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-threedsimulation
servicePort: 8080
- host: cmii-uav-tower.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-tower
servicePort: 8080
- host: cmii-uav-user.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-user
servicePort: 8080
- host: cmii-uav-waypoint.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-waypoint
servicePort: 8080
- host: uavms-security-center.uavcloud-sdejpt.io
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
serviceName: uavms-security-center
servicePort: 8080
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: all-gateways-ingress
namespace: sdejpt
labels:
type: api-gateway
octopus.control: all-ingress-config-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
spec:
rules:
- host: fake-domain.sdejpt.io
http:
paths:
- path: /oms/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-admin-gateway
servicePort: 8080
- path: /open/api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-open-gateway
servicePort: 8080
- path: /api/?(.*)
pathType: ImplementationSpecific
backend:
serviceName: cmii-uav-gateway
servicePort: 8080

View File

@@ -0,0 +1,78 @@
---
apiVersion: v1
kind: Service
metadata:
name: helm-mongo
namespace: sdejpt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
type: NodePort
selector:
cmii.app: helm-mongo
cmii.type: middleware
ports:
- port: 27017
name: server-27017
targetPort: 27017
nodePort: 37017
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mongo
namespace: sdejpt
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
spec:
serviceName: helm-mongo
replicas: 1
selector:
matchLabels:
cmii.app: helm-mongo
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-mongo
cmii.type: middleware
helm.sh/chart: mongo-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: 6.0.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: helm-mongo
image: 134.80.124.7:8033/cmii/mongo:5.0
resources: {}
ports:
- containerPort: 27017
name: mongo27017
protocol: TCP
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: cmlc
- name: MONGO_INITDB_ROOT_PASSWORD
value: REdPza8#oVlt
volumeMounts:
- name: mongo-data
mountPath: /data/db
readOnly: false
subPath: default/helm-mongo/data/db
volumes:
- name: mongo-data
persistentVolumeClaim:
claimName: helm-mongo
---

View File

@@ -0,0 +1,416 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: sdejpt
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-headless
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mysql-deploy
operator: In
values:
- "master"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 134.80.124.7:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 134.80.124.7:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/sdejpt/

View File

@@ -0,0 +1,130 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-nacos-cm
namespace: sdejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.0.0
data:
mysql.db.name: "cmii_nacos_config"
mysql.db.host: "helm-mysql"
mysql.port: "3306"
mysql.user: "k8s_admin"
mysql.password: "fP#UaH6qQ3)8"
---
apiVersion: v1
kind: Service
metadata:
name: helm-nacos
namespace: sdejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.0.0
spec:
type: NodePort
selector:
cmii.app: helm-nacos
cmii.type: middleware
ports:
- port: 8848
name: server
targetPort: 8848
nodePort: 38848
- port: 9848
name: server12
targetPort: 9848
- port: 9849
name: server23
targetPort: 9849
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-nacos
namespace: sdejpt
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: 6.0.0
spec:
serviceName: helm-nacos
replicas: 1
selector:
matchLabels:
cmii.app: helm-nacos
cmii.type: middleware
template:
metadata:
labels:
cmii.app: helm-nacos
cmii.type: middleware
octopus.control: nacos-wdd
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/version: 6.0.0
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
imagePullSecrets:
- name: harborsecret
affinity: {}
containers:
- name: nacos-server
image: 134.80.124.7:8033/cmii/nacos-server:v2.1.2
ports:
- containerPort: 8848
name: dashboard
- containerPort: 9848
name: tcp-9848
- containerPort: 9849
name: tcp-9849
env:
- name: NACOS_AUTH_ENABLE
value: "false"
- name: NACOS_REPLICAS
value: "1"
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.password
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: helm-nacos-cm
key: mysql.db.host
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: MODE
value: standalone
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
---

View File

@@ -0,0 +1,38 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-prod-distribute
resources:
requests:
storage: 1Mi
---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: test-pod
image: 134.80.124.7:8033/cmii/busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim #与PVC名称保持一致

View File

@@ -0,0 +1,114 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
# name: nfs-client-provisioner-runner
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-prod-distribute
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 134.80.124.7:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cmlc-nfs-storage
- name: NFS_SERVER
value: 134.80.124.21
- name: NFS_PATH
value: /var/lib/docker/nfs_data
volumes:
- name: nfs-client-root
nfs:
server: 134.80.124.21
path: /var/lib/docker/nfs_data

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-backend-log-pvc
namespace: sdejpt
labels:
cmii.type: middleware-base
cmii.app: nfs-backend-log-pvc
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.0.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-emqxs
namespace: sdejpt
labels:
cmii.type: middleware-base
cmii.app: helm-emqxs
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.0.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-mongo
namespace: sdejpt
labels:
cmii.type: middleware-base
cmii.app: helm-mongo
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.0.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: helm-rabbitmq
namespace: sdejpt
labels:
cmii.type: middleware-base
cmii.app: helm-rabbitmq
helm.sh/chart: all-persistence-volume-claims-1.1.0
app.kubernetes.io/version: 6.0.0
spec:
storageClassName: nfs-prod-distribute
accessModes:
- ReadWriteMany
volumeMode: Filesystem
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,328 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-rabbitmq
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
automountServiceAccountToken: true
secrets:
- name: helm-rabbitmq
---
apiVersion: v1
kind: Secret
metadata:
name: helm-rabbitmq
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
type: Opaque
data:
rabbitmq-password: "blljUk45MXIuX2hq"
rabbitmq-erlang-cookie: "emFBRmt1ZU1xMkJieXZvdHRYbWpoWk52UThuVXFzcTU="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-rabbitmq-config
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
data:
rabbitmq.conf: |-
## Username and password
##
default_user = admin
default_pass = nYcRN91r._hj
## Clustering
##
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator = min-masters
# enable guest user
loopback_users.guest = false
#default_vhost = default-vhost
#disk_free_limit.absolute = 50MB
#load_definitions = /app/load_definition.json
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-rabbitmq-endpoint-reader
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
subjects:
- kind: ServiceAccount
name: helm-rabbitmq
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: helm-rabbitmq-endpoint-reader
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq-headless
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
clusterIP: None
ports:
- name: epmd
port: 4369
targetPort: epmd
- name: amqp
port: 5672
targetPort: amqp
- name: dist
port: 25672
targetPort: dist
- name: dashboard
port: 15672
targetPort: stats
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sdejpt
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: helm-rabbitmq
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
type: NodePort
ports:
- name: amqp
port: 5672
targetPort: amqp
nodePort: 35672
- name: dashboard
port: 15672
targetPort: dashboard
nodePort: 36675
selector:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sdejpt
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-rabbitmq
namespace: sdejpt
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
spec:
serviceName: helm-rabbitmq-headless
podManagementPolicy: OrderedReady
replicas: 1
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: helm-rabbitmq
app.kubernetes.io/release: sdejpt
template:
metadata:
labels:
app.kubernetes.io/name: helm-rabbitmq
helm.sh/chart: rabbitmq-8.26.1
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: rabbitmq
annotations:
checksum/config: d6c2caa9572f64a06d9f7daa34c664a186b4778cd1697ef8e59663152fc628f1
checksum/secret: d764e7b3d999e7324d1afdfec6140092a612f04b6e0306818675815cec2f454f
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-rabbitmq
affinity: {}
securityContext:
fsGroup: 5001
runAsUser: 5001
terminationGracePeriodSeconds: 120
initContainers:
- name: volume-permissions
image: 134.80.124.7:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
args:
- -ec
- |
mkdir -p "/bitnami/rabbitmq/mnesia"
chown -R "5001:5001" "/bitnami/rabbitmq/mnesia"
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/rabbitmq/mnesia
containers:
- name: rabbitmq
image: 134.80.124.7:8033/cmii/rabbitmq:3.9.12-debian-10-r3
imagePullPolicy: "Always"
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "helm-rabbitmq-headless"
- name: K8S_ADDRESS_TYPE
value: hostname
- name: RABBITMQ_FORCE_BOOT
value: "no"
- name: RABBITMQ_NODE_NAME
value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: K8S_HOSTNAME_SUFFIX
value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: RABBITMQ_MNESIA_DIR
value: "/bitnami/rabbitmq/mnesia/$(RABBITMQ_NODE_NAME)"
- name: RABBITMQ_LDAP_ENABLE
value: "no"
- name: RABBITMQ_LOGS
value: "-"
- name: RABBITMQ_ULIMIT_NOFILES
value: "65536"
- name: RABBITMQ_USE_LONGNAME
value: "true"
- name: RABBITMQ_ERL_COOKIE
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-erlang-cookie
- name: RABBITMQ_LOAD_DEFINITIONS
value: "no"
- name: RABBITMQ_SECURE_PASSWORD
value: "yes"
- name: RABBITMQ_USERNAME
value: "admin"
- name: RABBITMQ_PASSWORD
valueFrom:
secretKeyRef:
name: helm-rabbitmq
key: rabbitmq-password
- name: RABBITMQ_PLUGINS
value: "rabbitmq_management, rabbitmq_peer_discovery_k8s, rabbitmq_shovel, rabbitmq_shovel_management, rabbitmq_auth_backend_ldap"
ports:
- name: amqp
containerPort: 5672
- name: dist
containerPort: 25672
- name: dashboard
containerPort: 15672
- name: epmd
containerPort: 4369
livenessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q ping
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/bash
- -ec
- rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms
initialDelaySeconds: 10
periodSeconds: 30
timeoutSeconds: 20
successThreshold: 1
failureThreshold: 3
lifecycle:
preStop:
exec:
command:
- /bin/bash
- -ec
- |
if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then
/opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t "120" -d "false"
else
rabbitmqctl stop_app
fi
resources:
limits: {}
requests: {}
volumeMounts:
- name: configuration
mountPath: /bitnami/rabbitmq/conf
- name: data
mountPath: /bitnami/rabbitmq/mnesia
volumes:
- name: configuration
configMap:
name: helm-rabbitmq-config
items:
- key: rabbitmq.conf
path: rabbitmq.conf
- name: data
persistentVolumeClaim:
claimName: helm-rabbitmq

View File

@@ -0,0 +1,585 @@
---
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: helm-redis
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
---
apiVersion: v1
kind: Secret
metadata:
name: helm-redis
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
redis-password: "TWNhY2hlQDQ1MjI="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-configuration
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
slave-read-only yes
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: outside-deploy/charts/redis-db/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-health
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 3 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: outside-deploy/charts/redis-db/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-redis-scripts
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
start-replica.sh: |
#!/bin/bash
get_port() {
hostname="$1"
type="$2"
port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g")
port=${!port_var}
if [ -z "$port" ]; then
case $type in
"SENTINEL")
echo 26379
;;
"REDIS")
echo 6379
;;
esac
else
echo $port
fi
}
get_full_hostname() {
hostname="$1"
echo "${hostname}.${HEADLESS_SERVICE}"
}
REDISPORT=$(get_port "$HOSTNAME" "REDIS")
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then
cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf
fi
if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
echo "" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf
echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf")
exec redis-server "${ARGS[@]}"
---
# Source: outside-deploy/charts/redis-db/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-headless
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sdejpt
---
# Source: outside-deploy/charts/redis-db/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-master
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
---
# Source: outside-deploy/charts/redis-db/templates/replicas/service.yaml
apiVersion: v1
kind: Service
metadata:
name: helm-redis-replicas
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
type: ClusterIP
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sdejpt
app.kubernetes.io/component: replica
---
# Source: outside-deploy/charts/redis-db/templates/master/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-master
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: redis
app.kubernetes.io/component: master
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
affinity: {}
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
imagePullSecrets:
- name: harborsecret
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 134.80.124.7:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: outside-deploy/charts/redis-db/templates/replicas/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-redis-replicas
namespace: sdejpt
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-db
app.kubernetes.io/release: sdejpt
app.kubernetes.io/component: replica
serviceName: helm-redis-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: redis-db
octopus.control: redis-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: replica
annotations:
checksum/configmap: b64aa5db67e6e63811f3c1095b9fce34d83c86a471fccdda0e48eedb53a179b0
checksum/health: 6e0a6330e5ac63e565ae92af1444527d72d8897f91266f333555b3d323570623
checksum/scripts: b88df93710b7c42a76006e20218f05c6e500e6cc2affd4bb1985832f03166e98
checksum/secret: 43f1b0e20f9cb2de936bd182bc3683b720fc3cf4f4e76cb23c06a52398a50e8d
spec:
imagePullSecrets:
- name: harborsecret
securityContext:
fsGroup: 1001
serviceAccountName: helm-redis
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: 134.80.124.7:8033/cmii/redis:6.2.6-debian-10-r0
imagePullPolicy: "Always"
securityContext:
runAsUser: 1001
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-replica.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: slave
- name: REDIS_MASTER_HOST
value: helm-redis-master-0.helm-redis-headless.sdejpt.svc.cluster.local
- name: REDIS_MASTER_PORT_NUMBER
value: "6379"
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_MASTER_PASSWORD
valueFrom:
secretKeyRef:
name: helm-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local_and_master.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local_and_master.sh 1
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "2"
memory: 8Gi
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
subPath:
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc
volumes:
- name: start-scripts
configMap:
name: helm-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: helm-redis-health
defaultMode: 0755
- name: config
configMap:
name: helm-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: redis-data
emptyDir: {}

View File

@@ -0,0 +1,496 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-srs-cm
namespace: sdejpt
labels:
cmii.app: live-srs
cmii.type: live
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
data:
srs.rtc.conf: |-
listen 31935;
max_connections 4096;
srs_log_tank console;
srs_log_level info;
srs_log_file /home/srs.log;
daemon off;
http_api {
enabled on;
listen 1985;
crossdomain on;
}
stats {
network 0;
}
http_server {
enabled on;
listen 8080;
dir /home/hls;
}
srt_server {
enabled on;
listen 30556;
maxbw 1000000000;
connect_timeout 4000;
peerlatency 600;
recvlatency 600;
}
rtc_server {
enabled on;
listen 30090;
candidate $CANDIDATE;
}
vhost __defaultVhost__ {
http_hooks {
enabled on;
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
}
http_remux {
enabled on;
}
rtc {
enabled on;
rtmp_to_rtc on;
rtc_to_rtmp on;
keep_bframe off;
}
tcp_nodelay on;
min_latency on;
play {
gop_cache off;
mw_latency 100;
mw_msgs 10;
}
publish {
firstpkt_timeout 8000;
normal_timeout 4000;
mr on;
}
dvr {
enabled off;
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
dvr_plan session;
}
hls {
enabled on;
hls_path /home/hls;
hls_fragment 10;
hls_window 60;
hls_m3u8_file [app]/[stream].m3u8;
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
hls_cleanup on;
hls_entry_prefix http://134.80.124.6:8088;
}
}
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc-exporter
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
nodePort: 31935
- name: rtc
protocol: UDP
port: 30090
targetPort: 30090
nodePort: 30090
- name: rtc-tcp
protocol: TCP
port: 30090
targetPort: 30090
nodePort: 30090
- name: srt
protocol: UDP
port: 30556
targetPort: 30556
nodePort: 30556
- name: api
protocol: TCP
port: 1985
targetPort: 1985
nodePort: 30080
selector:
srs-role: rtc
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srs-svc
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: http
protocol: TCP
port: 8080
targetPort: 8080
- name: api
protocol: TCP
port: 1985
targetPort: 1985
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-srsrtc-svc
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- name: rtmp
protocol: TCP
port: 31935
targetPort: 31935
selector:
srs-role: rtc
type: ClusterIP
sessionAffinity: None
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: helm-live-srs-rtc
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-srs
cmii.type: live
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
srs-role: rtc
spec:
replicas: 1
selector:
matchLabels:
srs-role: rtc
template:
metadata:
labels:
srs-role: rtc
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-srs-cm
items:
- key: srs.rtc.conf
path: docker.conf
defaultMode: 420
- name: srs-vol
emptyDir:
sizeLimit: 8Gi
containers:
- name: srs-rtc
image: 134.80.124.7:8033/cmii/srs:v5.0.195
ports:
- name: srs-rtmp
containerPort: 31935
protocol: TCP
- name: srs-api
containerPort: 1985
protocol: TCP
- name: srs-flv
containerPort: 8080
protocol: TCP
- name: srs-webrtc
containerPort: 30090
protocol: UDP
- name: srs-webrtc-tcp
containerPort: 30090
protocol: TCP
- name: srs-srt
containerPort: 30556
protocol: UDP
env:
- name: CANDIDATE
value: 134.80.124.6
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /usr/local/srs/conf/docker.conf
subPath: docker.conf
- name: srs-vol
mountPath: /home/dvr
subPath: sdejpt/helm-live/dvr
- name: srs-vol
mountPath: /home/hls
subPath: sdejpt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
- name: oss-adaptor
image: 134.80.124.7:8033/cmii/cmii-srs-oss-adaptor:2023-SA
env:
- name: OSS_ENDPOINT
value: 'http://134.80.124.15:9000'
- name: OSS_AK
value: cmii
- name: OSS_SK
value: 'B#923fC7mk'
- name: OSS_BUCKET
value: live-cluster-hls
- name: SRS_OP
value: 'http://helm-live-op-svc-v2:8080'
- name: MYSQL_ENDPOINT
value: 'helm-mysql:3306'
- name: MYSQL_USERNAME
value: k8s_admin
- name: MYSQL_PASSWORD
value: fP#UaH6qQ3)8
- name: MYSQL_DATABASE
value: cmii_live_srs_op
- name: MYSQL_TABLE
value: live_segment
- name: LOG_LEVEL
value: info
- name: OSS_META
value: 'yes'
resources:
limits:
cpu: 2000m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-vol
mountPath: /cmii/share/hls
subPath: sdejpt/helm-live/hls
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
serviceName: helm-live-srsrtc-svc
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
revisionHistoryLimit: 10
---
# live-srs部分
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: helm-live-op-v2
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
helm.sh/chart: cmlc-live-live-op-2.0.0
live-role: op-v2
spec:
replicas: 1
selector:
matchLabels:
live-role: op-v2
template:
metadata:
labels:
live-role: op-v2
spec:
volumes:
- name: srs-conf-file
configMap:
name: helm-live-op-cm-v2
items:
- key: live.op.conf
path: bootstrap.yaml
defaultMode: 420
containers:
- name: helm-live-op-v2
image: 134.80.124.7:8033/cmii/cmii-live-operator:5.2.0
ports:
- name: operator
containerPort: 8080
protocol: TCP
resources:
limits:
cpu: 4800m
memory: 4Gi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- name: srs-conf-file
mountPath: /cmii/bootstrap.yaml
subPath: bootstrap.yaml
livenessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /cmii/health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
imagePullSecrets:
- name: harborsecret
affinity: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc-v2
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
nodePort: 30333
selector:
live-role: op-v2
type: NodePort
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: helm-live-op-svc
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
spec:
ports:
- protocol: TCP
port: 8080
targetPort: 8080
selector:
live-role: op
type: ClusterIP
sessionAffinity: None
---
kind: ConfigMap
apiVersion: v1
metadata:
name: helm-live-op-cm-v2
namespace: sdejpt
labels:
octopus.control: wdd
app.kubernetes.io/managed-by: octopus
cmii.app: live-engine
cmii.type: live
data:
live.op.conf: |-
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
allow-circular-references: true
application:
name: cmii-live-operator
platform:
info:
name: cmii-live-operator
description: cmii-live-operator
version: 6.0.0
scanPackage: com.cmii.live.op
cloud:
nacos:
config:
username: nacos
password: KingKong@95461234
server-addr: helm-nacos:8848
extension-configs:
- data-id: cmii-live-operator.yml
group: 6.0.0
refresh: true
shared-configs:
- data-id: cmii-backend-system.yml
group: 6.0.0
refresh: true
discovery:
enabled: false
live:
engine:
type: srs
endpoint: 'http://helm-live-srs-svc:1985'
proto:
rtmp: 'rtmp://134.80.124.6:31935'
rtsp: 'rtsp://134.80.124.6:30554'
srt: 'srt://134.80.124.6:30556'
flv: 'http://134.80.124.6:30500'
hls: 'http://134.80.124.6:30500'
rtc: 'webrtc://134.80.124.6:30080'
replay: 'https://134.80.124.6:30333'
minio:
endpoint: http://134.80.124.15:9000
access-key: cmii
secret-key: B#923fC7mk
bucket: live-cluster-hls

View File

@@ -0,0 +1,68 @@
#!/bin/bash
harbor_host=192.168.0.10:8033
namespace=hbyd
app_name=""
new_tag=""
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -0,0 +1,35 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname storage-1
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat > /etc/hosts << EOF
192.168.0.8 master-node
192.168.0.65 worker-1
192.168.0.45 worker-2
192.168.0.7 worker-3
192.168.0.9 worker-4
192.168.0.10 worker-5
192.168.0.11 worker-6
192.168.0.84 worker-7
192.168.0.85 worker-8
192.168.0.86 worker-9
192.168.0.2 storage-1
192.168.0.15 gpu-1
192.168.0.12 gpu-2
192.168.0.13 gpu-3
EOF
yum clean all && yum makecache
36.134.71.138
Lingyun@443

View File

@@ -0,0 +1,75 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
export VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
# export selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root

View File

@@ -0,0 +1,280 @@
nodes:
- address: 192.168.0.8
user: rke-installer
role:
- controlplane
- etcd
- worker
internal_address: 192.168.0.8
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 192.168.0.65
user: rke-installer
role:
- worker
internal_address: 192.168.0.65
labels:
uavcloud.env: demo
- address: 192.168.0.45
user: rke-installer
role:
- worker
internal_address: 192.168.0.45
labels:
uavcloud.env: demo
- address: 192.168.0.7
user: rke-installer
role:
- worker
internal_address: 192.168.0.7
labels:
mysql-deploy: true
uavcloud.env: demo
- address: 192.168.0.9
user: rke-installer
role:
- worker
internal_address: 192.168.0.9
labels:
uavcloud.env: demo
- address: 192.168.0.10
user: rke-installer
role:
- worker
internal_address: 192.168.0.10
labels:
uavcloud.env: demo
- address: 192.168.0.11
user: rke-installer
role:
- worker
internal_address: 192.168.0.11
labels:
uavcloud.env: demo
- address: 192.168.0.83
user: rke-installer
role:
- worker
internal_address: 192.168.0.83
labels:
uavcloud.env: demo
- address: 192.168.0.84
user: rke-installer
role:
- worker
internal_address: 192.168.0.84
labels:
uavcloud.env: demo
- address: 192.168.0.85
user: rke-installer
role:
- worker
internal_address: 192.168.0.85
labels:
uavcloud.env: demo
authentication:
strategy: x509
sans:
- "192.168.0.8"
private_registries:
- url: 192.168.0.8:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /home/rke-installer/.ssh/id_ed25519
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
# client-body-timeout: '6000'
# compute-full-forwarded-for: 'true'
# enable-underscores-in-headers: 'true'
# log-format-escape-json: 'true'
# log-format-upstream: >-
# { "msec": "$msec", "connection": "$connection", "connection_requests":
# "$connection_requests", "pid": "$pid", "request_id": "$request_id",
# "request_length": "$request_length", "remote_addr": "$remote_addr",
# "remote_user": "$remote_user", "remote_port": "$remote_port",
# "http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
# "$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
# "request_uri": "$request_uri", "args": "$args", "status": "$status",
# "body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
# "http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
# "http_host": "$http_host", "server_name": "$server_name", "request_time":
# "$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
# "$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
# "upstream_response_time": "$upstream_response_time",
# "upstream_response_length": "$upstream_response_length",
# "upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
# "$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
# "request_method": "$request_method", "server_protocol": "$server_protocol",
# "pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
# "geoip_country_code": "$geoip_country_code" }
# proxy-body-size: 5120m
# proxy-read-timeout: '6000'
# proxy-send-timeout: '6000'

View File

@@ -0,0 +1,79 @@
#!/bin/bash
all_server_list=(192.168.0.10)
all_server_list=(192.168.0.9 192.168.0.10 192.168.0.11 192.168.0.65 192.168.0.45 192.168.0.7)
all_server_list=(192.168.0.83 192.168.0.84 192.168.0.85)
for server in "${all_server_list[@]}";do
echo "server is ${server}"
ssh ${server} "echo yes"
ssh ${server} "curl -s 10.74.32.6"
echo ""
done
kubectl taint nodes 192.168.0.9 key=experimental:NoSchedule
kubectl taint nodes 192.168.0.10 key=experimental:NoSchedule
kubectl taint nodes 192.168.0.11 key=experimental:NoSchedule
kubectl cordon 192.168.0.9
kubectl cordon 192.168.0.10
kubectl cordon 192.168.0.11
scp /root/.ssh/* root@${server}:/root/.ssh/
ssh root@${server} "mkdir /root/wdd"
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
ssh root@${server} "printf 'ssh\n' | octopus-agent --mode=bastion"
scp /root/wdd/disk.sh root@${server}:/root/wdd/disk.sh
ssh root@${server} "bash /root/wdd/disk.sh"
ssh root@${server} "mkdir /root/wdd"
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
ssh root@${server} "printf 'ssh\n' | octopus-agent --mode=bastion"
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
scp /etc/docker/daemon.json root@${server}:/etc/docker/
ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
echo "SuperRke.123
SuperRke.123
" | passwd rke-installer
ssh root@${server} "useradd rke-installer"
ssh root@${server} "mkdir /home/rke-installer"
ssh root@${server} "mkdir /home/rke-installer/.ssh "
scp /home/rke-installer/.ssh/* root@${server}:/home/rke-installer/.ssh/
ssh root@${server} "chown rke-installer:rke-installer -R /home/rke-installer"
ssh root@${server} "usermod -d /home/rke-installer rke-installer"
ssh root@${server} "gpasswd -a rke-installer docker"
ssh root@${server} "newgrp docker"
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCrEuk9I6spsHv03cSFQmhx0HZ90VBtqBCGlaisasJAK7wkam/AStAvB8Svn6GiNyJFp/vTr2C8MN01poJQ2pWVmj+WcIWVdbrwYmvIsLC6A4dtdxGOEj5Oejzod9JavAwz+EhTjy61T1Xyc7LSafYNtfrir7X7rCZhSN+GhdD9nN0TFbpLax4t84QDU8+dyALzmGEzbPKFZ2BBpbiNu8I0nheRojMdvwCIjV/c+RNEwXKOOk4EXch9mW4HJJ/n36mfC6Dq3CaDaX+IGuK/lgim2wDQFUFfeGWSgiERDZN2sFyxeyliBL39J7gQFXFcVXud+TCujKgpdzmyb+AulKFrUja2kSoA8CLmyiaRkha6SQXkaT/+KVE2q8mNeA6DDxlRR3rBB8MHJ6IKz1+SgwIip8t/ybgthmkBbFWXvjVfIK3cfmMaxqLTlJNEhngLRItLxOa96GivCSziY8CNidWlL4ekC4WF8X+zseBhnFCPG1hmbi4bMcw86CTIFgNhc20= rke-installer@master-node" >>/home/rke-installer/.ssh/authorized_keys
sed -i "s/\/data/\/var/lib/docker/g" /etc/fstab
umount /data
mount -a
df -TH

View File

@@ -0,0 +1,229 @@
nodes:
- address: 172.16.200.30
user: root
role:
- controlplane
- etcd
- worker
internal_address: 172.16.200.30
labels:
ingress-deploy: true
- address: 172.16.200.31
user: root
role:
- worker
internal_address: 172.16.200.31
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 172.16.200.32
user: root
role:
- worker
internal_address: 172.16.200.32
labels:
ingress-deploy: true
mysql-deploy: true
uavcloud.env: demo
- address: 172.16.200.33
user: root
role:
- worker
internal_address: 172.16.200.33
labels:
ingress-deploy: true
uavcloud.env: demo
- address: 172.16.200.34
user: root
role:
- worker
internal_address: 172.16.200.34
labels:
ingress-deploy: true
uavcloud.env: demo
authentication:
strategy: x509
sans:
- "172.16.200.30"
private_registries:
- url: 172.16.200.30:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
# Using the EventRateLimit admission control enforces a limit on the number of events
# that the API Server will accept in a given time period
# Available as of v1.0.0
event_rate_limit:
enabled: false
configuration:
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Server
qps: 6000
burst: 30000
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 0
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal:
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"

View File

View File

@@ -48,19 +48,19 @@ t
8e 8e
w w
" | fdisk /dev/sdb " | fdisk /dev/vdb
partprobe partprobe
# 如果已经存在卷组,直接进行添加 # 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3 # vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/sdb1 vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}') export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整 # 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME} lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data mkdir -p /data
mkdir -p /var/lib/docker mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0" selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0" # export selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab echo "${selffstab}" >> /etc/fstab
mount -a mount -a

View File

@@ -103,8 +103,8 @@ createRKEInstallerUser(){
colorEcho ${BLUE} "开始创建 rke-installer 用户………" colorEcho ${BLUE} "开始创建 rke-installer 用户………"
echo "" echo ""
useradd rke-installer useradd rke-installer
echo "rke-installer echo "SuperRke.123
rke-installer SuperRke.123
" | passwd rke-installer " | passwd rke-installer
#将登陆用户develop加入到docker用户组中 #将登陆用户develop加入到docker用户组中
gpasswd -a rke-installer docker gpasswd -a rke-installer docker

View File

@@ -8,4 +8,9 @@ https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/harbor-offline-i
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-linux-x86_64-v2.18.0 https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-linux-x86_64-v2.18.0
mv /root/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
printf 'help\n' | octopus-agent --mode=bastion
file_list=(docker-amd64-20.10.15.tgz harbor-offline-installer-v2.9.0 docker-compose-linux-x86_64-v2.18.0) file_list=(docker-amd64-20.10.15.tgz harbor-offline-installer-v2.9.0 docker-compose-linux-x86_64-v2.18.0)

View File

@@ -7,7 +7,7 @@ host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
for ip in "${host_list[@]}";do for ip in "${host_list[@]}";do
echo "current ip is $ip" echo "current ip is $ip"
ssh root@${ip} "curl 172.24.152.72" ssh root@${server} "curl 172.24.152.72"
done done
@@ -15,27 +15,27 @@ disk
10.129.80.245 10.129.80.245
mv /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 /usr/local/bin/octopus-agent mv /root/wdd/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent chmod +x /usr/local/bin/octopus-agent
# ssh root@${ip} "mkdir /root/wdd" ssh root@${server} "mkdir /root/wdd"
# scp /usr/local/bin/octopus-agent root@${ip}:/usr/local/bin/octopus-agent scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/ scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
# scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/ scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
# scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/ scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/ scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
# ssh root@${ip} "chmod +x /usr/local/bin/octopus-agent" ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
# ssh root@${ip} "printf 'firewall\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'sysconfig\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'swap\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'selinux\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'docker\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'dockercompose\n' | octopus-agent --mode=bastion" ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
scp /etc/docker/daemon.json root@${ip}:/etc/docker/ scp /etc/docker/daemon.json root@${server}:/etc/docker/
ssh root@${ip} "systemctl restart docker && sleep 3 && docker info" ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
sed -i '/^$/d' ~/.ssh/* sed -i '/^$/d' ~/.ssh/*

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
namespace=xmyd namespace=bjyd
install_yq() { install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
@@ -43,7 +43,7 @@ backup_all_stateful_sets() {
echo "" echo ""
} }
#install_yq install_yq
backup_all_deployment backup_all_deployment
backup_all_service backup_all_service
backup_all_stateful_sets backup_all_stateful_sets

View File

@@ -5,7 +5,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq chmod +x /usr/local/bin/jq
export name_space=bjyd export name_space=xmyd
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
@@ -13,3 +13,4 @@ kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.conta
kubectl -n ${name_space} delete pod helm-nacos-0 --force kubectl -n ${name_space} delete pod helm-nacos-0 --force

View File

@@ -0,0 +1,24 @@
#!/bin/bash
name_space=xmyd
kubectl get pods --namespace="${name_space}" --sort-by='.status.containerStatuses[].restartCount' | awk '$4 > 7 {print $1} ' | grep -v NAME | xargs -I {} kubectl delete -n "${name_space}" pod {} --force
#all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-uat uavcloud-dev uavcloud-devflight uavcloud-devoperation)
all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-dev uavcloud-devflight uavcloud-devoperation)
echo ""
for name_space in "${all_cmii_name_space[@]}"; do
echo "[NAMESPACE] - start to deal with namespace [$name_space]"
if ! kubectl get ns "$name_space"; then
echo "[NAMESPACE] - namespace of [$name_space] not exists !"
echo ""
continue
fi
echo ""
kubectl get pods --namespace="${name_space}" --sort-by='.status.containerStatuses[].restartCount' | awk '$4 > 30 {print $1} ' | sed 's/-[a-z0-9]\{9,10\}-[a-z0-9]\{5\}$//' | xargs -I {} kubectl scale -n "${name_space}" --replicas=0 deployment {}
echo ""
done

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,66 @@
#!/bin/bash
# 设置测试目录
TEST_DIR="/var/lib/docker/"
# 设置大文件和小文件的大小
LARGE_FILE_SIZE=3G # 1GB
SMALL_FILE_SIZE=10M # 1MB
LARGE_FILE="${TEST_DIR}/large_test_file"
SMALL_FILE="${TEST_DIR}/small_test_file"
# 创建测试目录,如果不存在
mkdir -p "$TEST_DIR"
# 测试大文件读写速度
echo "开始大文件读写测试..."
# 写入大文件并测量时间
start_time=$(date +%s.%N)
dd if=/dev/zero of="$LARGE_FILE" bs=1M count=1024 conv=fdatasync
end_time=$(date +%s.%N)
write_time=$(echo "$end_time - $start_time" | bc)
write_speed=$(echo "scale=2; 1024 / $write_time" | bc)
echo "大文件写入速度: ${write_speed} MB/s"
# 读取大文件并测量时间
start_time=$(date +%s.%N)
dd if="$LARGE_FILE" of=/dev/null bs=1M
end_time=$(date +%s.%N)
read_time=$(echo "$end_time - $start_time" | bc)
read_speed=$(echo "scale=2; 1024 / $read_time" | bc)
echo "大文件读取速度: ${read_speed} MB/s"
# 清理大文件
rm -f "$LARGE_FILE"
# 测试小文件读写速度
echo "开始小文件读写测试..."
# 写入小文件并测量时间
start_time=$(date +%s.%N)
for i in $(seq 1 3); do
dd if=/dev/zero of="${TEST_DIR}/small_test_file_$i" bs=1M count=1 conv=fdatasync
done
end_time=$(date +%s.%N)
write_time=$(echo "$end_time - $start_time" | bc)
write_speed=$(echo "scale=2; 1024 / $write_time" | bc)
echo "小文件写入速度: ${write_speed} MB/s"
# 读取小文件并测量时间
start_time=$(date +%s.%N)
for i in $(seq 1 3); do
dd if="${TEST_DIR}/small_test_file_$i" of=/dev/null bs=1M
done
end_time=$(date +%s.%N)
read_time=$(echo "$end_time - $start_time" | bc)
read_speed=$(echo "scale=2; 1024 / $read_time" | bc)
echo "小文件读取速度: ${read_speed} MB/s"
# 清理小文件
rm -f "${TEST_DIR}/small_test_file_"*
echo "测试完成!"

View File

@@ -0,0 +1,67 @@
import os
import time
def write_large_file(file_path, size_mb):
"""写入大文件"""
with open(file_path, 'wb') as f:
f.write(os.urandom(size_mb * 1024 * 1024)) # 写入指定大小的随机数据
def read_large_file(file_path):
"""读取大文件"""
with open(file_path, 'rb') as f:
f.read() # 读取文件内容
def write_small_files(dir_path, num_files, file_size):
"""写入小文件"""
for i in range(num_files):
file_path = os.path.join(dir_path, f'small_file_{i}.txt')
with open(file_path, 'wb') as f:
f.write(os.urandom(file_size)) # 写入指定大小的随机数据
def read_small_files(dir_path, num_files):
"""读取小文件"""
for i in range(num_files):
file_path = os.path.join(dir_path, f'small_file_{i}.txt')
with open(file_path, 'rb') as f:
f.read() # 读取文件内容
def measure_io(test_type, *args):
"""测量IO性能"""
start_time = time.time()
if test_type == 'large_write':
write_large_file(*args)
elif test_type == 'large_read':
read_large_file(*args)
elif test_type == 'small_write':
write_small_files(*args)
elif test_type == 'small_read':
read_small_files(*args)
end_time = time.time()
return end_time - start_time
def main():
dir_path = '/var/lib/docker/' # 修改为你的测试目录
large_file_size_mb = 10240 # 大文件大小MB
small_file_size = 1024 # 小文件大小(字节)
num_small_files = 100 # 小文件数量
# 大文件测试
print("开始大文件写入测试...")
large_write_time = measure_io('large_write', os.path.join(dir_path, 'large_file.bin'), large_file_size_mb)
print(f"大文件写入时间: {large_write_time:.2f}")
print("开始大文件读取测试...")
large_read_time = measure_io('large_read', os.path.join(dir_path, 'large_file.bin'))
print(f"大文件读取时间: {large_read_time:.2f}")
# 小文件测试
print("开始小文件写入测试...")
small_write_time = measure_io('small_write', dir_path, num_small_files, small_file_size)
print(f"小文件写入时间: {small_write_time:.2f}")
print("开始小文件读取测试...")
small_read_time = measure_io('small_read', dir_path, num_small_files)
print(f"小文件读取时间: {small_read_time:.2f}")
if __name__ == '__main__':
main()

View File

@@ -14,7 +14,7 @@ server {
proxy_buffering off; proxy_buffering off;
proxy_buffer_size 4k; proxy_buffer_size 4k;
proxy_buffers 4 12k; proxy_buffers 4 12k;
proxy_set_header Host fake-domain.jxejpt.io; proxy_set_header Host fake-domain.sdejpt.io;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";

View File

@@ -2,7 +2,7 @@ version: '3'
services: services:
cmii-nginx: cmii-nginx:
image: 10.20.1.135:8033/cmii/nginx:1.21.3 image: 134.80.124.7:8033/cmii/nginx:1.21.3
volumes: volumes:
- /etc/nginx/conf.d:/etc/nginx/conf.d - /etc/nginx/conf.d:/etc/nginx/conf.d
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf - /etc/nginx/nginx.conf:/etc/nginx/nginx.conf

View File

@@ -0,0 +1,55 @@
#!/bin/bash
kubectl apply -f k8s-dashboard.yaml
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
kubectl apply -f k8s-nfs.yaml
kubectl delete -f k8s-nfs.yaml
kubectl -n kube-system describe pod $(kubectl -n kube-system get pods | grep nfs-client-provisioner | awk '{print$1}')
kubectl apply -f k8s-nfs-test.yaml
kubectl delete -f k8s-nfs-test.yaml
cd /var/lib/docker/nfs_data
kubectl apply -f k8s-pvc.yaml
kubectl delete -f k8s-pvc.yaml
kubectl apply -f k8s-mongo.yaml
kubectl delete -f k8s-mongo.yaml
kubectl apply -f k8s-emqx.yaml
kubectl delete -f k8s-emqx.yaml
kubectl apply -f k8s-rabbitmq.yaml
kubectl delete -f k8s-rabbitmq.yam
kubectl apply -f k8s-redis.yaml
kubectl delete -f k8s-redis.yamll
kubectl apply -f k8s-mysql.yaml
kubectl delete -f k8s-mysql.yaml
----
kubectl apply -f k8s-nacos.yaml
kubectl delete -f k8s-nacos.yaml
---
kubectl apply -f k8s-configmap.yaml
kubectl delete -f k8s-configmap.yaml
kubectl apply -f k8s-ingress.yaml
kubectl delete -f k8s-ingress.yaml
kubectl apply -f k8s-frontend.yaml
kubectl delete -f k8s-frontend.yaml
kubectl apply -f k8s-backend.yaml
kubectl delete -f k8s-backend.yaml
kubectl -n kube-system get pods -o jsonpath='{.items[*].metadata.name}'

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
minio_inner_ip_host=10.129.80.223:9000 minio_inner_ip_host=134.80.124.15:9000
download_ts2mp4_file(){ download_ts2mp4_file(){
echo "" echo ""

View File

@@ -8,7 +8,7 @@ env:
value: "eth0" value: "eth0"
# 更加保险 # 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=enp4s3 kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
# 删除所有的calico pod # 删除所有的calico pod

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -45,4 +45,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:30 -- Dump completed on 2024-11-14 11:14:05

View File

@@ -45,4 +45,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:30 -- Dump completed on 2024-11-14 11:14:05

File diff suppressed because one or more lines are too long

View File

@@ -55,4 +55,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:30 -- Dump completed on 2024-11-14 11:14:05

View File

@@ -35,4 +35,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:29 -- Dump completed on 2024-11-14 11:14:04

View File

@@ -45,4 +45,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:30 -- Dump completed on 2024-11-14 11:14:05

View File

@@ -36,4 +36,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:31 -- Dump completed on 2024-11-14 11:14:05

View File

@@ -35,4 +35,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:31 -- Dump completed on 2024-11-14 11:14:05

View File

@@ -46,4 +46,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:30 -- Dump completed on 2024-11-14 11:14:05

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -35,4 +35,4 @@ UNLOCK TABLES;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2024-10-15 10:54:29 -- Dump completed on 2024-11-14 11:14:04

File diff suppressed because one or more lines are too long

View File

@@ -9,6 +9,7 @@ SQL_DUMP_FILE=/root/all_tables_4.1.0.sql
SQL_FULL_BACK_UP_FILE=/home/mmc/all_tables_4.0.2_230914_fullback.sql SQL_FULL_BACK_UP_FILE=/home/mmc/all_tables_4.0.2_230914_fullback.sql
/root/wdd/mysql/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs /root/wdd/mysql/bin/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 --single-transaction --source-data=2 --hex-blob --triggers --routines --events --set-gtid-purged=OFF --databases > "${SQL_FULL_BACK_UP_FILE}" /root/wdd/mysql/bin/mysql -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 -e 'show databases;' | grep -Ev 'Database|information_schema|mysql|sys|performance_schema' | xargs /root/wdd/mysql/bin/mysqldump -uroot -pQzfXQhd3bQ -h127.0.0.1 -P33306 --single-transaction --source-data=2 --hex-blob --triggers --routines --events --set-gtid-purged=OFF --databases > "${SQL_FULL_BACK_UP_FILE}"
# 被封了nacos_config # 被封了nacos_config
/root/wdd/mysql/bin/mysqldump -uroot -pQzfXQhd3bQ -h10.250.0.200 -P33306 -t --set-gtid-purged=OFF cmii_nacos_config config_info his_config_info roles users > 17.cmii_nacos_config.sql /root/wdd/mysql/bin/mysqldump -uroot -pQzfXQhd3bQ -h10.250.0.200 -P33306 -t --set-gtid-purged=OFF cmii_nacos_config config_info his_config_info roles users > 17.cmii_nacos_config.sql
sed -i '1s/^/use cmii_nacos_config;\n/' 17.cmii_nacos_config.sql sed -i '1s/^/use cmii_nacos_config;\n/' 17.cmii_nacos_config.sql