[Agent][Deploy] - cmii update
This commit is contained in:
5143
agent-operator/deploy/z_bjtg/k8s-backend.yaml
Executable file
5143
agent-operator/deploy/z_bjtg/k8s-backend.yaml
Executable file
File diff suppressed because it is too large
Load Diff
336
agent-operator/deploy/z_bjtg/k8s-configmap.yaml
Executable file
336
agent-operator/deploy/z_bjtg/k8s-configmap.yaml
Executable file
@@ -0,0 +1,336 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-cmsportal
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "cmsportal",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-detection
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "detection",
|
||||
AppClientId: "APP_FDHW2VLVDWPnnOCy"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-emergency
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "emergency",
|
||||
AppClientId: "APP_aGsTAY1uMZrpKdfk"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-multiterminal
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "multiterminal",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-oms
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "oms",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-traffic
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "traffic",
|
||||
AppClientId: "APP_Jc8i2wOQ1t73QEJS"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-jiangsuwenlv
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "jiangsuwenlv",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-ai-brain
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "ai-brain",
|
||||
AppClientId: "APP_rafnuCAmBESIVYMH"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-armypeople
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "armypeople",
|
||||
AppClientId: "APP_UIegse6Lfou9pO1U"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-media
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "media",
|
||||
AppClientId: "APP_4AU8lbifESQO4FD6"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-mws
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "mws",
|
||||
AppClientId: "APP_uKniXPELlRERBBwK"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-open
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "open",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-qinghaitourism
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "qinghaitourism",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-platform
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "platform",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-splice
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "splice",
|
||||
AppClientId: "APP_zE0M3sTRXrCIJS8Y"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-logistics
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "logistics",
|
||||
AppClientId: "APP_PvdfRRRBPL8xbIwl"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-supervisionh5
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "supervisionh5",
|
||||
AppClientId: "APP_qqSu82THfexI8PLM"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-base
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "base",
|
||||
AppClientId: "APP_9LY41OaKSqk2btY0"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-security
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "security",
|
||||
AppClientId: "APP_JUSEMc7afyWXxvE7"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-securityh5
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "securityh5",
|
||||
AppClientId: "APP_N3ImO0Ubfu9peRHD"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-seniclive
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "seniclive",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-share
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "share",
|
||||
AppClientId: "APP_4lVSVI0ZGxTssir8"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-threedsimulation
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "threedsimulation",
|
||||
AppClientId: "empty"
|
||||
}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tenant-prefix-supervision
|
||||
namespace: bjtg
|
||||
data:
|
||||
ingress-config.js: |-
|
||||
var __GlobalIngressConfig = {
|
||||
TenantEnvironment: "",
|
||||
CloudHOST: "10.250.0.110:8888",
|
||||
ApplicationShortName: "supervision",
|
||||
AppClientId: "APP_qqSu82THfexI8PLM"
|
||||
}
|
||||
1842
agent-operator/deploy/z_bjtg/k8s-dashboard.yaml
Executable file
1842
agent-operator/deploy/z_bjtg/k8s-dashboard.yaml
Executable file
File diff suppressed because it is too large
Load Diff
1578
agent-operator/deploy/z_bjtg/k8s-emqx.yaml
Executable file
1578
agent-operator/deploy/z_bjtg/k8s-emqx.yaml
Executable file
File diff suppressed because it is too large
Load Diff
1909
agent-operator/deploy/z_bjtg/k8s-frontend.yaml
Executable file
1909
agent-operator/deploy/z_bjtg/k8s-frontend.yaml
Executable file
File diff suppressed because it is too large
Load Diff
3264
agent-operator/deploy/z_bjtg/k8s-ingress.yaml
Executable file
3264
agent-operator/deploy/z_bjtg/k8s-ingress.yaml
Executable file
File diff suppressed because it is too large
Load Diff
450
agent-operator/deploy/z_bjtg/k8s-mongo.yaml
Executable file
450
agent-operator/deploy/z_bjtg/k8s-mongo.yaml
Executable file
@@ -0,0 +1,450 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 27017
|
||||
name: server-27017
|
||||
targetPort: 27017
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-mongo
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-mongo
|
||||
cmii.type: middleware
|
||||
helm.sh/chart: mongo-1.1.0
|
||||
app.kubernetes.io/managed-by: octopus-control
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: helm-mongo
|
||||
image: 10.250.0.110:8033/cmii/mongo:5.0
|
||||
resources: { }
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
name: mongo27017
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MONGO_INITDB_ROOT_USERNAME
|
||||
value: cmlc
|
||||
- name: MONGO_INITDB_ROOT_PASSWORD
|
||||
value: REdPza8#oVlt
|
||||
volumeMounts:
|
||||
- name: mongo-data
|
||||
mountPath: /data/db
|
||||
readOnly: false
|
||||
subPath: default/helm-mongo/data/db
|
||||
volumes:
|
||||
- name: mongo-data
|
||||
persistentVolumeClaim:
|
||||
claimName: helm-mongo
|
||||
---
|
||||
2526
agent-operator/deploy/z_bjtg/k8s-mysql.yaml
Executable file
2526
agent-operator/deploy/z_bjtg/k8s-mysql.yaml
Executable file
File diff suppressed because it is too large
Load Diff
756
agent-operator/deploy/z_bjtg/k8s-nacos.yaml
Executable file
756
agent-operator/deploy/z_bjtg/k8s-nacos.yaml
Executable file
@@ -0,0 +1,756 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: helm-nacos-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
data:
|
||||
mysql.db.name: "cmii_nacos_config"
|
||||
mysql.db.host: "helm-mysql"
|
||||
mysql.port: "3306"
|
||||
mysql.user: "k8s_admin"
|
||||
mysql.password: "fP#UaH6qQ3)8"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
ports:
|
||||
- port: 8848
|
||||
name: server
|
||||
targetPort: 8848
|
||||
nodePort: 38989
|
||||
- port: 9848
|
||||
name: server12
|
||||
targetPort: 9848
|
||||
nodePort: 38912
|
||||
- port: 9849
|
||||
name: server23
|
||||
targetPort: 9849
|
||||
nodePort: 38923
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: helm-nacos
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
serviceName: helm-nacos
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
cmii.app: helm-nacos
|
||||
cmii.type: middleware
|
||||
octopus.control: nacos-wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
spec:
|
||||
affinity: { }
|
||||
containers:
|
||||
- name: nacos-server
|
||||
image: 10.250.0.110:8033/cmii/nacos-server:v2.1.2
|
||||
ports:
|
||||
- containerPort: 8848
|
||||
name: dashboard
|
||||
env:
|
||||
- name: NACOS_AUTH_ENABLE
|
||||
value: "false"
|
||||
- name: NACOS_REPLICAS
|
||||
value: "1"
|
||||
- name: MYSQL_SERVICE_DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.name
|
||||
- name: MYSQL_SERVICE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.port
|
||||
- name: MYSQL_SERVICE_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.user
|
||||
- name: MYSQL_SERVICE_PASSWORD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.password
|
||||
- name: MYSQL_SERVICE_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: helm-nacos-cm
|
||||
key: mysql.db.host
|
||||
- name: NACOS_SERVER_PORT
|
||||
value: "8848"
|
||||
- name: NACOS_APPLICATION_PORT
|
||||
value: "8848"
|
||||
- name: PREFER_HOST_MODE
|
||||
value: "hostname"
|
||||
- name: MODE
|
||||
value: standalone
|
||||
- name: SPRING_DATASOURCE_PLATFORM
|
||||
value: mysql
|
||||
---
|
||||
216
agent-operator/deploy/z_bjtg/k8s-nfs-test.yaml
Executable file
216
agent-operator/deploy/z_bjtg/k8s-nfs-test.yaml
Executable file
@@ -0,0 +1,216 @@
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-claim
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: "nfs-prod-distribute" #与nfs-StorageClass.yaml metadata.name保持一致
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: nfs-prod-distribute
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Mi
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-pod
|
||||
image: 10.250.0.110:8033/cmii/busybox
|
||||
command:
|
||||
- "/bin/sh"
|
||||
args:
|
||||
- "-c"
|
||||
- "touch /mnt/NFS-CREATE-SUCCESS && exit 0 || exit 1" #创建一个SUCCESS文件后退出
|
||||
volumeMounts:
|
||||
- name: nfs-pvc
|
||||
mountPath: "/mnt"
|
||||
restartPolicy: "Never"
|
||||
volumes:
|
||||
- name: nfs-pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: test-claim #与PVC名称保持一致
|
||||
672
agent-operator/deploy/z_bjtg/k8s-nfs.yaml
Executable file
672
agent-operator/deploy/z_bjtg/k8s-nfs.yaml
Executable file
@@ -0,0 +1,672 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #根据实际环境设定namespace,下面类同
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-client-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumes" ]
|
||||
verbs: [ "get", "list", "watch", "create", "delete" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "persistentvolumeclaims" ]
|
||||
verbs: [ "get", "list", "watch", "update" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "update", "patch" ]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: run-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
# name: nfs-client-provisioner-runner
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints" ]
|
||||
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-nfs-client-provisioner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-prod-distribute
|
||||
provisioner: cmlc-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致parameters: archiveOnDelete: "false"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nfs-client-provisioner
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
# replace with namespace where provisioner is deployed
|
||||
namespace: kube-system #与RBAC文件中的namespace保持一致
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-client-provisioner
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-client-provisioner
|
||||
spec:
|
||||
serviceAccountName: nfs-client-provisioner
|
||||
containers:
|
||||
- name: nfs-client-provisioner
|
||||
image: 10.250.0.110:8033/cmii/nfs-subdir-external-provisioner:v4.0.2
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: cmlc-nfs-storage
|
||||
- name: NFS_SERVER
|
||||
value: 10.250.0.110
|
||||
- name: NFS_PATH
|
||||
value: /var/lib/docker/nfs_data
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
nfs:
|
||||
server: 10.250.0.110
|
||||
path: /var/lib/docker/nfs_data
|
||||
456
agent-operator/deploy/z_bjtg/k8s-pvc.yaml
Executable file
456
agent-operator/deploy/z_bjtg/k8s-pvc.yaml
Executable file
@@ -0,0 +1,456 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-backend-log-pvc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: nfs-backend-log-pvc
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-emqxs
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-emqxs
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-mongo
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-mongo
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: helm-rabbitmq
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.type: middleware-base
|
||||
cmii.app: helm-rabbitmq
|
||||
helm.sh/chart: all-persistence-volume-claims-1.1.0
|
||||
app.kubernetes.io/version: 5.5.0
|
||||
spec:
|
||||
storageClassName: nfs-prod-distribute
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
3924
agent-operator/deploy/z_bjtg/k8s-rabbitmq.yaml
Executable file
3924
agent-operator/deploy/z_bjtg/k8s-rabbitmq.yaml
Executable file
File diff suppressed because it is too large
Load Diff
3492
agent-operator/deploy/z_bjtg/k8s-redis.yaml
Executable file
3492
agent-operator/deploy/z_bjtg/k8s-redis.yaml
Executable file
File diff suppressed because it is too large
Load Diff
499
agent-operator/deploy/z_bjtg/k8s-srs.yaml
Executable file
499
agent-operator/deploy/z_bjtg/k8s-srs.yaml
Executable file
@@ -0,0 +1,499 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-cm
|
||||
namespace: bjtg
|
||||
labels:
|
||||
cmii.app: live-srs
|
||||
cmii.type: live
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
data:
|
||||
srs.rtc.conf: |-
|
||||
listen 30935;
|
||||
max_connections 4096;
|
||||
srs_log_tank console;
|
||||
srs_log_level info;
|
||||
srs_log_file /home/srs.log;
|
||||
daemon off;
|
||||
http_api {
|
||||
enabled on;
|
||||
listen 1985;
|
||||
crossdomain on;
|
||||
}
|
||||
stats {
|
||||
network 0;
|
||||
}
|
||||
http_server {
|
||||
enabled on;
|
||||
listen 8080;
|
||||
dir /home/hls;
|
||||
}
|
||||
srt_server {
|
||||
enabled on;
|
||||
listen 30556;
|
||||
maxbw 1000000000;
|
||||
connect_timeout 4000;
|
||||
peerlatency 600;
|
||||
recvlatency 600;
|
||||
}
|
||||
rtc_server {
|
||||
enabled on;
|
||||
listen 30090;
|
||||
candidate $CANDIDATE;
|
||||
}
|
||||
vhost __defaultVhost__ {
|
||||
http_hooks {
|
||||
enabled on;
|
||||
on_publish http://helm-live-op-svc-v2:8080/hooks/on_push;
|
||||
}
|
||||
http_remux {
|
||||
enabled on;
|
||||
}
|
||||
rtc {
|
||||
enabled on;
|
||||
rtmp_to_rtc on;
|
||||
rtc_to_rtmp on;
|
||||
keep_bframe off;
|
||||
}
|
||||
tcp_nodelay on;
|
||||
min_latency on;
|
||||
play {
|
||||
gop_cache off;
|
||||
mw_latency 100;
|
||||
mw_msgs 10;
|
||||
}
|
||||
publish {
|
||||
firstpkt_timeout 8000;
|
||||
normal_timeout 4000;
|
||||
mr on;
|
||||
}
|
||||
dvr {
|
||||
enabled off;
|
||||
dvr_path /home/dvr/[app]/[stream]/[2006][01]/[timestamp].mp4;
|
||||
dvr_plan session;
|
||||
}
|
||||
hls {
|
||||
enabled on;
|
||||
hls_path /home/hls;
|
||||
hls_fragment 10;
|
||||
hls_window 60;
|
||||
hls_m3u8_file [app]/[stream].m3u8;
|
||||
hls_ts_file [app]/[stream]/[2006][01][02]/[timestamp]-[duration].ts;
|
||||
hls_cleanup on;
|
||||
hls_entry_prefix http://10.250.0.110:8888;
|
||||
}
|
||||
}
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc-exporter
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
nodePort: 30935
|
||||
- name: rtc
|
||||
protocol: UDP
|
||||
port: 30090
|
||||
targetPort: 30090
|
||||
nodePort: 30090
|
||||
- name: rtc-tcp
|
||||
protocol: TCP
|
||||
port: 30090
|
||||
targetPort: 30090
|
||||
nodePort: 30090
|
||||
- name: srt
|
||||
protocol: UDP
|
||||
port: 30556
|
||||
targetPort: 30556
|
||||
nodePort: 30556
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
nodePort: 30557
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
externalTrafficPolicy: Cluster
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srs-svc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
- name: api
|
||||
protocol: TCP
|
||||
port: 1985
|
||||
targetPort: 1985
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-srsrtc-svc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
spec:
|
||||
ports:
|
||||
- name: rtmp
|
||||
protocol: TCP
|
||||
port: 30935
|
||||
targetPort: 30935
|
||||
selector:
|
||||
srs-role: rtc
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-srs-rtc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
cmii.app: live-srs
|
||||
cmii.type: live
|
||||
helm.sh/chart: cmlc-live-srs-rtc-2.0.0
|
||||
srs-role: rtc
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
srs-role: rtc
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
srs-role: rtc
|
||||
spec:
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: helm-live-srs-cm
|
||||
items:
|
||||
- key: srs.rtc.conf
|
||||
path: docker.conf
|
||||
defaultMode: 420
|
||||
- name: srs-vol
|
||||
emptyDir:
|
||||
sizeLimit: 8Gi
|
||||
containers:
|
||||
- name: srs-rtc
|
||||
image: 10.250.0.110:8033/cmii/srs:v5.0.195
|
||||
ports:
|
||||
- name: srs-rtmp
|
||||
containerPort: 30935
|
||||
protocol: TCP
|
||||
- name: srs-api
|
||||
containerPort: 1985
|
||||
protocol: TCP
|
||||
- name: srs-flv
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
- name: srs-webrtc
|
||||
containerPort: 30090
|
||||
protocol: UDP
|
||||
- name: srs-webrtc-tcp
|
||||
containerPort: 30090
|
||||
protocol: TCP
|
||||
- name: srs-srt
|
||||
containerPort: 30556
|
||||
protocol: UDP
|
||||
env:
|
||||
- name: CANDIDATE
|
||||
value: 10.250.0.110
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 6Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /usr/local/srs/conf/docker.conf
|
||||
subPath: docker.conf
|
||||
- name: srs-vol
|
||||
mountPath: /home/dvr
|
||||
subPath: bjtg/helm-live/dvr
|
||||
- name: srs-vol
|
||||
mountPath: /home/hls
|
||||
subPath: bjtg/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
- name: oss-adaptor
|
||||
image: 10.250.0.110:8033/cmii/cmii-srs-oss-adaptor:2023-SA
|
||||
env:
|
||||
- name: OSS_ENDPOINT
|
||||
value: 'http://10.250.0.110:9000'
|
||||
- name: OSS_AK
|
||||
value: cmii
|
||||
- name: OSS_SK
|
||||
value: 'B#923fC7mk'
|
||||
- name: OSS_BUCKET
|
||||
value: live-cluster-hls
|
||||
- name: SRS_OP
|
||||
value: 'http://helm-live-op-svc-v2:8080'
|
||||
- name: MYSQL_ENDPOINT
|
||||
value: 'helm-mysql:3306'
|
||||
- name: MYSQL_USERNAME
|
||||
value: k8s_admin
|
||||
- name: MYSQL_PASSWORD
|
||||
value: fP#UaH6qQ3)8
|
||||
- name: MYSQL_DATABASE
|
||||
value: cmii_live_srs_op
|
||||
- name: MYSQL_TABLE
|
||||
value: live_segment
|
||||
- name: LOG_LEVEL
|
||||
value: info
|
||||
- name: OSS_META
|
||||
value: 'yes'
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1200m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-vol
|
||||
mountPath: /cmii/share/hls
|
||||
subPath: bjtg/helm-live/hls
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: { }
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity: { }
|
||||
schedulerName: default-scheduler
|
||||
serviceName: helm-live-srsrtc-svc
|
||||
podManagementPolicy: OrderedReady
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
partition: 0
|
||||
revisionHistoryLimit: 10
|
||||
---
|
||||
# live-srs部分
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: helm-live-op-v2
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
cmii.app: live-engine
|
||||
cmii.type: live
|
||||
helm.sh/chart: cmlc-live-live-op-2.0.0
|
||||
live-role: op-v2
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
live-role: op-v2
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
live-role: op-v2
|
||||
spec:
|
||||
volumes:
|
||||
- name: srs-conf-file
|
||||
configMap:
|
||||
name: helm-live-op-cm-v2
|
||||
items:
|
||||
- key: live.op.conf
|
||||
path: bootstrap.yaml
|
||||
defaultMode: 420
|
||||
containers:
|
||||
- name: helm-live-op-v2
|
||||
image: 10.250.0.110:8033/cmii/cmii-live-operator:5.2.0
|
||||
ports:
|
||||
- name: operator
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4800m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
volumeMounts:
|
||||
- name: srs-conf-file
|
||||
mountPath: /cmii/bootstrap.yaml
|
||||
subPath: bootstrap.yaml
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /cmii/ping
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /cmii/ping
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 20
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: { }
|
||||
imagePullSecrets:
|
||||
- name: harborsecret
|
||||
affinity: { }
|
||||
schedulerName: default-scheduler
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 25%
|
||||
maxSurge: 25%
|
||||
revisionHistoryLimit: 10
|
||||
progressDeadlineSeconds: 600
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-svc-v2
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
nodePort: 30333
|
||||
selector:
|
||||
live-role: op-v2
|
||||
type: NodePort
|
||||
sessionAffinity: None
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-svc
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
selector:
|
||||
live-role: op
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: helm-live-op-cm-v2
|
||||
namespace: bjtg
|
||||
labels:
|
||||
octopus.control: wdd
|
||||
app.kubernetes.io/managed-by: octopus
|
||||
cmii.app: live-engine
|
||||
cmii.type: live
|
||||
data:
|
||||
live.op.conf: |-
|
||||
server:
|
||||
port: 8080
|
||||
spring:
|
||||
main:
|
||||
allow-bean-definition-overriding: true
|
||||
allow-circular-references: true
|
||||
application:
|
||||
name: cmii-live-operator
|
||||
platform:
|
||||
info:
|
||||
name: cmii-live-operator
|
||||
description: cmii-live-operator
|
||||
version: 5.5.0
|
||||
scanPackage: com.cmii.live.op
|
||||
cloud:
|
||||
nacos:
|
||||
config:
|
||||
username: developer
|
||||
password: N@cos14Good
|
||||
server-addr: helm-nacos:8848
|
||||
extension-configs:
|
||||
- data-id: cmii-live-operator.yml
|
||||
group: 5.5.0
|
||||
refresh: true
|
||||
shared-configs:
|
||||
- data-id: cmii-backend-system.yml
|
||||
group: 5.5.0
|
||||
refresh: true
|
||||
discovery:
|
||||
enabled: false
|
||||
|
||||
live:
|
||||
engine:
|
||||
type: srs
|
||||
endpoint: 'http://helm-live-srs-svc:1985'
|
||||
|
||||
proto:
|
||||
rtmp: 'rtmp://10.250.0.110:30935'
|
||||
rtsp: 'rtsp://10.250.0.110:30554'
|
||||
srt: 'srt://10.250.0.110:30556'
|
||||
flv: 'http://10.250.0.110:30500'
|
||||
hls: 'http://10.250.0.110:30500'
|
||||
rtc: 'webrtc://10.250.0.110:30557'
|
||||
replay: 'https://10.250.0.110:30333'
|
||||
minio:
|
||||
endpoint: http://10.250.0.110:9000
|
||||
access-key: cmii
|
||||
secret-key: B#923fC7mk
|
||||
bucket: live-cluster-hls
|
||||
Reference in New Issue
Block a user