新增RMDC部分的代码

This commit is contained in:
zeaslity
2025-12-12 18:19:31 +08:00
parent fd60868b97
commit d962ace967
23 changed files with 2174 additions and 42 deletions

11
.idea/go.imports.xml generated Normal file
View File

@@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GoImports">
<option name="excludedPackages">
<array>
<option value="github.com/pkg/errors" />
<option value="golang.org/x/net/context" />
</array>
</option>
</component>
</project>

102
.idea/workspace.xml generated
View File

@@ -4,23 +4,31 @@
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="新增雄安空能院项目">
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/deploy/k8s-pyfusion-configmap.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/offline.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/关停恢复.md" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/关停脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/启动脚本.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/67-202508-雄安空能院/关停计划/备份/real-nginx-proxy.conf" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.14.txt" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/rke-1.30.14-cluster-official.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/rke-13014-cluster-security.yml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" afterDir="false" />
<list default="true" id="a078e6aa-c7c7-487c-ab23-90fee7ad88b2" name="Changes" comment="新增GPU部分">
<change afterPath="$PROJECT_DIR$/.idea/go.imports.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v2.7.0.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v7.10.2-无法启动.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.4.xlsx" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/harbor-secret.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/helm-minio.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-emqx.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-postgresql-timescaledb.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/k8s-timescaledb-16C32GB-prod.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/kubernetes-dashboard-v2.7.0.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/tpu_plugin_pcie.yaml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/70-202511-XA低空平台/cmii-update.sh" afterDir="false" />
<change afterPath="$PROJECT_DIR$/998-常用脚本/cmii-hosts.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/41-202410-珠海横琴/0-dependencies.sh" beforeDir="false" afterPath="$PROJECT_DIR$/41-202410-珠海横琴/0-dependencies.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" beforeDir="false" afterPath="$PROJECT_DIR$/67-202508-雄安空能院/重要备份.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/58-202503-新DEMO环境/批量指令.sh" beforeDir="false" afterPath="$PROJECT_DIR$/58-202503-新DEMO环境/批量指令.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" beforeDir="false" afterPath="$PROJECT_DIR$/69-202511-AI-GPU测试/rke-13014-cluster-security.yml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/清理rke集群的安装.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/a-部署脚本/编辑calico状态.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/关停全部的服务.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/备份脚本/备份命名空间.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/更新脚本/副本数调整.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/更新脚本/副本数调整.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" beforeDir="false" afterPath="$PROJECT_DIR$/998-常用脚本/磁盘脚本/0-挂载磁盘.sh" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
@@ -32,6 +40,12 @@
</component>
<component name="KubernetesApiPersistence">{}</component>
<component name="KubernetesApiProvider">{
&quot;configuredContexts&quot;: [
{
&quot;name&quot;: &quot;wdd-rmdc-cluster&quot;,
&quot;kubeConfigUrl&quot;: &quot;file://C:/Users/wddsh/.kube/config&quot;
}
],
&quot;isMigrated&quot;: true
}</component>
<component name="ProjectColorInfo">{
@@ -53,6 +67,7 @@
&quot;RunOnceActivity.git.unshallow&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.formatter.settings.were.checked&quot;: &quot;true&quot;,
&quot;RunOnceActivity.go.migrated.go.modules.settings&quot;: &quot;true&quot;,
&quot;RunOnceActivity.typescript.service.memoryLimit.init&quot;: &quot;true&quot;,
&quot;SHARE_PROJECT_CONFIGURATION_FILES&quot;: &quot;true&quot;,
&quot;git-widget-placeholder&quot;: &quot;main&quot;,
&quot;go.import.settings.migrated&quot;: &quot;true&quot;,
@@ -64,15 +79,20 @@
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;editor.preferences.tabs&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
},
&quot;keyToStringList&quot;: {
&quot;DatabaseDriversLRU&quot;: [
&quot;mysql&quot;
]
}
}</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\69-202511-AI-GPU测试" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\70-202511-XA低空平台" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\68-202511-k8s升级1-30-14版本" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\deploy" />
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\doris-deploy" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\wddsh\Documents\IdeaProjects\CmiiDeploy\67-202508-雄安空能院\关停计划\备份" />
@@ -84,7 +104,8 @@
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-js-predefined-d6986cc7102b-3aa1da707db6-JavaScript-IU-252.27397.103" />
<option value="bundled-jdk-30f59d01ecdd-cffe25b9f5b3-intellij.indexing.shared.core-IU-253.28294.334" />
<option value="bundled-js-predefined-d6986cc7102b-c7e53b3be11b-JavaScript-IU-253.28294.334" />
</set>
</attachedChunks>
</component>
@@ -147,7 +168,40 @@
<workItem from="1762329425217" duration="5788000" />
<workItem from="1762760898943" duration="4498000" />
<workItem from="1762849000043" duration="5966000" />
<workItem from="1762928252671" duration="4692000" />
<workItem from="1762928252671" duration="4916000" />
<workItem from="1763015715677" duration="3469000" />
<workItem from="1763104939921" duration="4523000" />
<workItem from="1763538861577" duration="6886000" />
<workItem from="1763622999281" duration="6314000" />
<workItem from="1763639715589" duration="57000" />
<workItem from="1763949547333" duration="1001000" />
<workItem from="1763950583516" duration="1190000" />
<workItem from="1763952097500" duration="2718000" />
<workItem from="1763982697047" duration="1904000" />
<workItem from="1764033021091" duration="6079000" />
<workItem from="1764205819234" duration="247000" />
<workItem from="1764233694815" duration="2176000" />
<workItem from="1764236953080" duration="3099000" />
<workItem from="1764291767639" duration="2377000" />
<workItem from="1764553228556" duration="8000" />
<workItem from="1764557902114" duration="1232000" />
<workItem from="1764579004197" duration="2926000" />
<workItem from="1765181883392" duration="3327000" />
<workItem from="1765195917399" duration="382000" />
<workItem from="1765196348460" duration="405000" />
<workItem from="1765198068456" duration="4510000" />
<workItem from="1765242632611" duration="271000" />
<workItem from="1765243041640" duration="1192000" />
<workItem from="1765247867961" duration="249000" />
<workItem from="1765248279914" duration="1236000" />
<workItem from="1765331322582" duration="651000" />
<workItem from="1765337637351" duration="1142000" />
<workItem from="1765369724636" duration="298000" />
<workItem from="1765414368392" duration="234000" />
<workItem from="1765435760830" duration="16000" />
<workItem from="1765453325001" duration="1343000" />
<workItem from="1765519520794" duration="3192000" />
<workItem from="1765532805423" duration="595000" />
</task>
<task id="LOCAL-00001" summary="common update">
<option name="closed" value="true" />
@@ -173,7 +227,15 @@
<option name="project" value="LOCAL" />
<updated>1754963979625</updated>
</task>
<option name="localTasksCounter" value="4" />
<task id="LOCAL-00004" summary="新增GPU部分">
<option name="closed" value="true" />
<created>1762942452911</created>
<option name="number" value="00004" />
<option name="presentableId" value="LOCAL-00004" />
<option name="project" value="LOCAL" />
<updated>1762942452911</updated>
</task>
<option name="localTasksCounter" value="5" />
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
@@ -197,9 +259,7 @@
<MESSAGE value="common update" />
<MESSAGE value="修改CICD的jenkins构建脚本" />
<MESSAGE value="新增雄安空能院项目" />
<option name="LAST_COMMIT_MESSAGE" value="新增雄安空能院项目" />
</component>
<component name="VgoProject">
<settings-migrated>true</settings-migrated>
<MESSAGE value="新增GPU部分" />
<option name="LAST_COMMIT_MESSAGE" value="新增GPU部分" />
</component>
</project>

View File

@@ -21,9 +21,6 @@ for ip in ${ip_list[@]}
do
# 使用密码 123 ssh 登录到主机
echo "chpasswd <<< 'root:V2ryStrP@ss'" | ssh root@${ip}
done
ssh root@192.168.40.50 <<< 'scyd@lab1234'
ssh root@192.168.40.50 <<< 'scyd@lab1234'

View File

@@ -0,0 +1,331 @@
# ------------------- Dashboard Namespace ------------------- #
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
# ------------------- Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Service (NodePort 39999) ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
# ------------------- Dashboard Role (FIXED) ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
rules:
# [修复] 允许创建 Secrets解决 panic 问题
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# 允许对特定 Secrets 进行操作
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# ConfigMaps 权限
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Metrics 权限
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# ------------------- Dashboard RoleBinding ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ------------------- Metrics Scraper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
# ------------------- Metrics Scraper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
volumes:
- name: tmp-volume
emptyDir: {}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ==================================================================
# 自定义用户配置部分 (ADMIN & READ-ONLY)
# ==================================================================
# ------------------- 1. Admin User (全部权限) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: read-only-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dashboard-view-with-logs
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: read-only-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dashboard-view-with-logs
subjects:
- kind: ServiceAccount
name: read-only-user
namespace: kubernetes-dashboard
# kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
# kubectl create token read-only-user -n kubernetes-dashboard --duration=26280h
## 1 token的管控
# # 删除旧的绑定(为了保险起见,避免残留)
# kubectl delete clusterrolebinding admin-user
#
## 2 重新创建绑定
# kubectl create clusterrolebinding admin-user \
# --clusterrole=cluster-admin \
# --serviceaccount=kubernetes-dashboard:admin-user
## 3 重新生成token
# kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
## 你无法查看已经生成的 Token 列表。

View File

@@ -0,0 +1,420 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: kubernetes-dashboard
template:
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: docker.io/kubernetesui/dashboard-web:1.6.2
imagePullPolicy: Always
ports:
- containerPort: 8000
protocol: TCP
args:
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop:
- ALL
- name: dashboard-api
image: docker.io/kubernetesui/dashboard-api:1.11.1
imagePullPolicy: Always
ports:
- containerPort: 9000
protocol: TCP
args:
- --namespace=kubernetes-dashboard
- --kubeconfig=
volumeMounts:
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 9000
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop:
- ALL
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"kubernetes.io/hostname": master-192.168.40.50
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8000
selector:
app.kubernetes.io/name: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app.kubernetes.io/name: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: dashboard-metrics-scraper
template:
metadata:
labels:
app.kubernetes.io/name: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: docker.io/kubernetesui/dashboard-metrics-scraper:1.2.2
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop:
- ALL
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
kind: Service
apiVersion: v1
metadata:
labels:
app.kubernetes.io/name: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
app.kubernetes.io/name: dashboard-metrics-scraper
---
kind: Service
apiVersion: v1
metadata:
labels:
app.kubernetes.io/name: kubernetes-dashboard
name: kubernetes-dashboard-nodeport
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8000
nodePort: 39999
protocol: TCP
selector:
app.kubernetes.io/name: kubernetes-dashboard
---
#创建管理员用户(全部权限)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
# 创建只读用户(可查看日志,不能执行和删除)
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: readonly-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: readonly-with-logs
rules:
- apiGroups: [""]
resources: ["pods", "pods/log", "services", "replicationcontrollers", "persistentvolumeclaims", "namespaces", "events", "configmaps", "secrets", "nodes", "persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments", "daemonsets", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: readonly-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: readonly-with-logs
subjects:
- kind: ServiceAccount
name: readonly-user
namespace: kubernetes-dashboard
---
# 创建永久 Token
---
apiVersion: v1
kind: Secret
metadata:
name: admin-user-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: admin-user
type: kubernetes.io/service-account-token
---
apiVersion: v1
kind: Secret
metadata:
name: readonly-user-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: readonly-user
type: kubernetes.io/service-account-token
---
# kubectl -n kubernetes-dashboard create token admin-user --duration=87600h
# kubectl -n kubernetes-dashboard create token readonly-user --duration=87600h
# kubectl get secret admin-user-token -n kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d
# kubectl get secret readonly-user-token -n kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
.dockerconfigjson:
ewoJImF1dGhzIjogewoJCSJoYXJib3ItcWEuc3JlLmNkY3l5LmNuIjogewoJCQkiYXV0aCI6ICJjbUZrTURKZlpISnZibVU2UkhKdmJtVkFNVEl6TkE9PSIKCQl9LAoJCSJoYXJib3Iud2RkLmlvOjgwMzMiOiB7CiAgICAgICAgICAgICAiYXV0aCI6ICJZV1J0YVc0NlUzVndaWEppYjJkbExqRXlNdz09IgogICAgICAgIH0sCiAgICAgICAgImhhcmJvci5jZGN5eS5jb20uY24iOiB7CgkJCSJhdXRoIjogImNtRmtNREpmWkhKdmJtVTZSSEp2Ym1WQU1USXpOQT09IgoJCX0KCX0KfQ==
kind: Secret
metadata:
name: harborsecret
namespace: cmii-rmdc
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: wdd-rmdc
name: helm-minio
spec:
serviceName: helm-minio
replicas: 1
selector:
matchLabels:
app: helm-minio
template:
metadata:
labels:
app: helm-minio
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- "worker-3-192.168.40.63"
containers:
- name: minio
image: harbor.cdcyy.com.cn/cmii/minio:RELEASE.2023-06-02T23-17-26Z
command: ["/bin/sh", "-c"]
args:
- minio server /data --console-address ":9001"
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ACCESS_KEY
value: "cmii"
- name: MINIO_SECRET_KEY
value: "B#923fC7mk"
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 2Gi
cpu: "2"
requests:
memory: 200Mi
cpu: 200m
volumes:
- name: data
# persistentVolumeClaim:
# claimName: helm-minio
hostPath:
path: /var/lib/docker/minio-pv/
---
apiVersion: v1
kind: Service
metadata:
name: helm-minio
namespace: wdd-rmdc
spec:
selector:
app: helm-minio
ports:
- name: api
port: 9000
targetPort: 9000
nodePort: 39000
- name: console
port: 9001
targetPort: 9001
nodePort: 39001
type: NodePort

View File

@@ -0,0 +1,392 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-emqxs
namespace: wdd-rmdc
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-env
namespace: wdd-rmdc
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: base-1.0
data:
# 集群相关
EMQX_CLUSTER__DISCOVERY: "k8s"
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
EMQX_CLUSTER__K8S__NAMESPACE: "wdd-rmdc"
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
# 关闭匿名,默认 ACL 不匹配拒绝
EMQX_AUTH__ALLOW_ANONYMOUS: "false"
EMQX_AUTHZ__NO_MATCH: "deny"
# Dashboard 初始管理员密码(只在第一次启动时生效)
EMQX_DASHBOARD__DEFAULT_PASSWORD: "odD8#Ve7.B"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-emqxs-init-script
namespace: wdd-rmdc
labels:
cmii.type: middleware
cmii.app: helm-emqxs
data:
init-mqtt-user.sh: |
#!/bin/sh
set -e
DASHBOARD_USER="admin"
DASHBOARD_PASS="odD8#Ve7.B"
MQTT_USER="admin"
MQTT_PASS="odD8#Ve7.B"
# 等待 EMQX 本地 API 就绪
EMQX_API="http://localhost:18083/api/v5"
echo "等待 EMQX API 就绪..."
for i in $(seq 1 120); do
if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
echo "EMQX API 已就绪"
break
fi
echo "等待中... ($i/120)"
sleep 5
done
# 修改 Dashboard 管理员密码
echo "修改 Dashboard 管理员密码..."
/opt/emqx/bin/emqx ctl admins passwd "${DASHBOARD_USER}" "${DASHBOARD_PASS}" || echo "密码可能已设置"
echo "Dashboard 密码设置完成"
# 获取 Dashboard Token
echo "获取 Dashboard Token..."
TOKEN=$(curl -s -X POST "${EMQX_API}/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}" \
| grep -o '"token":"[^"]*' | cut -d'"' -f4)
if [ -z "$TOKEN" ]; then
echo "ERROR: 无法获取 Token"
exit 1
fi
echo "Token 获取成功"
# 创建内置数据库认证器(使用 listeners 作用域)
echo "检查并创建内置数据库认证器..."
# 为 tcp:default listener 添加认证器
echo "为 listener tcp:default 配置认证器..."
curl -s -X POST "${EMQX_API}/authentication/tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"mechanism": "password_based",
"backend": "built_in_database",
"user_id_type": "username",
"password_hash_algorithm": {
"name": "sha256",
"salt_position": "suffix"
}
}' 2>/dev/null || echo "tcp:default 认证器可能已存在"
# 为 ws:default listener 添加认证器
echo "为 listener ws:default 配置认证器..."
curl -s -X POST "${EMQX_API}/authentication/ws:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"mechanism": "password_based",
"backend": "built_in_database",
"user_id_type": "username",
"password_hash_algorithm": {
"name": "sha256",
"salt_position": "suffix"
}
}' 2>/dev/null || echo "ws:default 认证器可能已存在"
# 等待认证器创建完成
sleep 2
# 创建 MQTT 用户
echo "创建 MQTT 用户: ${MQTT_USER}..."
curl -s -X POST "${EMQX_API}/authentication/password_based:built_in_database/users?listener_id=tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"user_id\":\"${MQTT_USER}\",\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
2>/dev/null || echo "用户可能已存在,尝试更新..."
# 尝试更新密码
curl -s -X PUT "${EMQX_API}/authentication/password_based:built_in_database/users/${MQTT_USER}?listener_id=tcp:default" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
2>/dev/null || true
echo "MQTT 用户创建/更新完成"
# 创建授权规则
echo "配置授权规则..."
# 创建内置数据库授权源
curl -s -X POST "${EMQX_API}/authorization/sources" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d '{
"type": "built_in_database",
"enable": true
}' 2>/dev/null || echo "授权源可能已存在"
sleep 2
# 为 admin 用户添加授权规则(使用数组格式)
echo "为 ${MQTT_USER} 用户添加 ACL 规则..."
curl -s -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "[{\"username\":\"${MQTT_USER}\",\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}]" \
2>/dev/null && echo "ACL 规则创建成功" || echo "规则可能已存在,尝试更新..."
# 尝试更新规则PUT 请求需要单个对象,不是数组)
curl -s -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${MQTT_USER}" \
-H "Authorization: Bearer ${TOKEN}" \
-H 'Content-Type: application/json' \
-d "{\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}" \
2>/dev/null && echo "ACL 规则更新成功" || true
echo "ACL 规则配置完成"
echo "初始化完成MQTT 用户: ${MQTT_USER}"
echo "可通过以下方式连接:"
echo " - MQTT: localhost:1883"
echo " - WebSocket: localhost:8083"
echo " - Dashboard: http://localhost:18083"
echo " - 用户名: ${MQTT_USER}"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-emqxs
namespace: wdd-rmdc
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: base-1.0
spec:
replicas: 1
serviceName: helm-emqxs-headless
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
template:
metadata:
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: base-1.0
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- worker-3-192.168.40.63
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-emqxs
containers:
- name: helm-emqxs
image: harbor.cdcyy.com.cn/cmii/emqx:5.8.8
imagePullPolicy: Always
ports:
- name: mqtt
containerPort: 1883
- name: mqttssl
containerPort: 8883
- name: mgmt
containerPort: 8081
- name: ws
containerPort: 8083
- name: wss
containerPort: 8084
- name: dashboard
containerPort: 18083
- name: ekka
containerPort: 4370
envFrom:
- configMapRef:
name: helm-emqxs-env
# 添加生命周期钩子
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- |
# 后台执行初始化脚本,避免阻塞容器启动
nohup /bin/sh /scripts/init-mqtt-user.sh > /tmp/init.log 2>&1 &
# 添加健康检查,确保 initContainer 执行时 API 已就绪
livenessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /status
port: 18083
initialDelaySeconds: 10
periodSeconds: 5
resources: {}
volumeMounts:
# 5.x 默认 data 目录,包含所有持久化数据
- name: emqx-data
mountPath: "/opt/emqx/data"
readOnly: false
- name: init-script
mountPath: /scripts
volumes:
- name: emqx-data
hostPath:
path: /var/lib/docker/emqx
- name: init-script
configMap:
name: helm-emqxs-init-script
defaultMode: 0755
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: wdd-rmdc
rules:
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- watch
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: helm-emqxs
namespace: wdd-rmdc
subjects:
- kind: ServiceAccount
name: helm-emqxs
namespace: wdd-rmdc
roleRef:
kind: Role
name: helm-emqxs
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs
namespace: wdd-rmdc
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: base-1.0
spec:
type: NodePort
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- port: 1883
name: mqtt
targetPort: 1883
nodePort: 31883
- port: 18083
name: dashboard
targetPort: 18083
nodePort: 38085
- port: 8083
name: mqtt-websocket
targetPort: 8083
nodePort: 38083
---
apiVersion: v1
kind: Service
metadata:
name: helm-emqxs-headless
namespace: wdd-rmdc
labels:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
helm.sh/chart: emqx-1.1.0
app.kubernetes.io/managed-by: octopus-control
app.kubernetes.io/version: base-1.0
spec:
type: ClusterIP
clusterIP: None
selector:
cmii.type: middleware
cmii.app: helm-emqxs
cmii.emqx.architecture: cluster
ports:
- name: mqtt
port: 1883
protocol: TCP
targetPort: 1883
- name: mqttssl
port: 8883
protocol: TCP
targetPort: 8883
- name: mgmt
port: 8081
protocol: TCP
targetPort: 8081
- name: websocket
port: 8083
protocol: TCP
targetPort: 8083
- name: wss
port: 8084
protocol: TCP
targetPort: 8084
- name: dashboard
port: 18083
protocol: TCP
targetPort: 18083
- name: ekka
port: 4370
protocol: TCP
targetPort: 4370
---
# tail -f /tmp/init.log

View File

@@ -0,0 +1,142 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: timescaledb-config
namespace: wdd-rmdc
data:
postgresql.conf: |
# 内存优化配置
shared_buffers = 4GB
effective_cache_size = 12GB
maintenance_work_mem = 1GB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 128MB
min_wal_size = 2GB
max_wal_size = 8GB
max_worker_processes = 8
max_parallel_workers_per_gather = 4
max_parallel_workers = 8
max_parallel_maintenance_workers = 4
# TimescaleDB 优化
timescaledb.max_background_workers = 8
shared_preload_libraries = 'timescaledb'
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: timescaledb
namespace: wdd-rmdc
spec:
serviceName: timescaledb
replicas: 1
selector:
matchLabels:
app: timescaledb
template:
metadata:
labels:
app: timescaledb
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- worker-2-192.168.40.62
imagePullSecrets:
- name: harborsecret
containers:
- name: timescaledb
image: harbor.cdcyy.com.cn/cmii/timescaledb:2.24.0-pg17
ports:
- containerPort: 5432
name: postgresql
env:
- name: POSTGRES_PASSWORD
value: "admin@123"
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_DB
value: "timescaledb"
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: TS_TUNE_MEMORY
value: "16GB"
- name: TS_TUNE_NUM_CPUS
value: "8"
resources:
requests:
memory: "8Gi"
cpu: "4"
limits:
memory: "16Gi"
cpu: "4"
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
- name: config
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U postgres
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U postgres
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: config
configMap:
name: timescaledb-config
- name: data
hostPath:
path: /var/lib/docker/postgresql_data
---
apiVersion: v1
kind: Service
metadata:
name: timescaledb
namespace: wdd-rmdc
spec:
type: NodePort
ports:
- port: 5432
targetPort: 5432
nodePort: 35432
protocol: TCP
name: postgresql
selector:
app: timescaledb
---
apiVersion: v1
kind: Service
metadata:
name: timescaledb-headless
namespace: wdd-rmdc
spec:
clusterIP: None
ports:
- port: 5432
targetPort: 5432
protocol: TCP
name: postgresql
selector:
app: timescaledb

View File

@@ -0,0 +1,187 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: timescaledb-rmdc-config
namespace: cmii-rmdc
data:
postgresql.conf: |
# ========== 内存核心参数优化(32GB) ==========
# shared_buffers: 32GB以下推荐25%总内存
shared_buffers = 8GB
# effective_cache_size: 推荐50-75%总内存
effective_cache_size = 24GB
# work_mem: (Total RAM * 0.25) / max_connections
# (32GB * 0.25) / 150 ≈ 54MB,向上调整至128MB
work_mem = 128MB
# maintenance_work_mem: 建议1-2GB,32GB内存可提升至2GB
maintenance_work_mem = 2GB
# ========== WAL相关参数 ==========
wal_buffers = 32MB
min_wal_size = 4GB
max_wal_size = 16GB
checkpoint_completion_target = 0.9
wal_compression = on
# ========== 查询规划器参数 ==========
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
# ========== 并行查询参数 ==========
max_worker_processes = 16
max_parallel_workers_per_gather = 4
max_parallel_workers = 8
max_parallel_maintenance_workers = 4
# ========== 连接相关 ==========
max_connections = 150
# ========== TimescaleDB专用优化 ==========
timescaledb.max_background_workers = 8
shared_preload_libraries = 'timescaledb'
# TimescaleDB压缩优化(针对时序数据)
timescaledb.max_background_workers = 8
# ========== 日志配置 ==========
logging_collector = on
log_min_duration_statement = 1000
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
log_checkpoints = on
# ========== 自动清理优化 ==========
autovacuum_max_workers = 4
autovacuum_naptime = 10s
autovacuum_vacuum_scale_factor = 0.05
autovacuum_analyze_scale_factor = 0.02
# ========== 其他性能优化 ==========
checkpoint_timeout = 15min
bgwriter_delay = 200ms
bgwriter_lru_maxpages = 100
bgwriter_lru_multiplier = 2.0
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: timescaledb-rmdc
namespace: cmii-rmdc
spec:
serviceName: timescaledb-rmdc-svc
replicas: 1
selector:
matchLabels:
app: timescaledb-rmdc
template:
metadata:
labels:
app: timescaledb-rmdc
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- worker-1-192.168.40.61
imagePullSecrets:
- name: harborsecret
containers:
- name: timescaledb
image: harbor.wdd.io:8033/rmdc/timescaledb:2.24.0-pg17
ports:
- containerPort: 5432
name: postgresql
env:
- name: POSTGRES_PASSWORD
value: "Super@Boge.123"
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_DB
value: "timescaledb"
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: TS_TUNE_MEMORY
value: "32GB"
- name: TS_TUNE_NUM_CPUS
value: "16"
resources:
requests:
memory: "16Gi"
cpu: "8"
limits:
memory: "32Gi"
cpu: "16"
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
- name: config
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U postgres
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pg_isready -U postgres
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
volumes:
- name: config
configMap:
name: timescaledb-rmdc-config
- name: data
hostPath:
path: /var/lib/docker/postgresql_data
---
---
apiVersion: v1
kind: Service
metadata:
name: timescaledb-rmdc-svc
namespace: cmii-rmdc
spec:
type: NodePort
ports:
- port: 5432
targetPort: 5432
nodePort: 35435
protocol: TCP
name: postgresql
selector:
app: timescaledb-rmdc
---
apiVersion: v1
kind: Service
metadata:
name: timescaledb-rmdc-svc-headless
namespace: cmii-rmdc
spec:
clusterIP: None
ports:
- port: 5432
targetPort: 5432
protocol: TCP
name: postgresql
selector:
app: timescaledb-rmdc

View File

@@ -0,0 +1,316 @@
# ------------------- Dashboard Namespace ------------------- #
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
# ------------------- Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Service (NodePort 39999) ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 39999
selector:
k8s-app: kubernetes-dashboard
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
# ------------------- Dashboard Role (FIXED) ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
rules:
# [修复] 允许创建 Secrets解决 panic 问题
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# 允许对特定 Secrets 进行操作
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# ConfigMaps 权限
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Metrics 权限
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
# ------------------- Dashboard RoleBinding ------------------- #
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-minimal
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"kubernetes.io/hostname": master-192.168.40.50
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ------------------- Metrics Scraper Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
# ------------------- Metrics Scraper Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
"kubernetes.io/hostname": master-192.168.40.50
volumes:
- name: tmp-volume
emptyDir: {}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
---
# ==================================================================
# 自定义用户配置部分 (ADMIN & READ-ONLY)
# ==================================================================
# ------------------- 1. Admin User (全部权限) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
name: read-only-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: dashboard-view-with-logs
rules:
- apiGroups: [""]
resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses", "networkpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: read-only-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: dashboard-view-with-logs
subjects:
- kind: ServiceAccount
name: read-only-user
namespace: kubernetes-dashboard

View File

@@ -24,6 +24,31 @@ nodes:
internal_address: 192.168.119.106
labels:
uavcloud.env: demo
- address: 192.168.40.61
user: root
role:
- worker
internal_address: 192.168.40.61
hostname_override: "worker-1-192.168.40.61"
labels:
uavcloud.env: demo
- address: 192.168.40.62
user: root
role:
- worker
internal_address: 192.168.40.62
hostname_override: "worker-2-192.168.40.62"
labels:
uavcloud.env: demo
- address: 192.168.40.63
user: root
role:
- worker
internal_address: 192.168.40.63
hostname_override: "worker-3-192.168.40.63"
labels:
uavcloud.env: demo
authentication:
strategy: x509
@@ -80,7 +105,7 @@ services:
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
always_pull_images: false
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
@@ -141,7 +166,8 @@ network:
mtu: 1440
options:
flannel_backend_type: vxlan
plugin: calico
plugin: flannel
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sophon-device-plugin
namespace: kube-system
labels:
app: sophon-device-plugin
spec:
selector:
matchLabels:
app: sophon-device-plugin
template:
metadata:
labels:
app: sophon-device-plugin
spec:
containers:
- name: sophon-device-plugin
image: sophon-device-plugin:1.1.9
imagePullPolicy: IfNotPresent # 必须在这里设置
securityContext:
privileged: true
env:
- name: TPU_INSTANCE_NUM # only valid when target num bigger than physical chip num
value: "0"
- name: MIX_MODE_CHIP_NUM # only for bm1686 chips
value: "0"
- name: NAME_WITH_SN_MODE
value: "0"
# - name: TPU_INSTANCE_NUM_PER_CHIP
# value: "1:1:1"
volumeMounts:
- name: devfs
mountPath: /dev
- name: sysfs
mountPath: /sys
- name: kubeletsockets
mountPath: /var/lib/kubelet/device-plugins
- name: libsophon
mountPath: /opt/tpuv7
- name: envs
mountPath: /etc
- name: usrsbin
mountPath: /usr/sbin
- name: usershare
mountPath: /usr/share
- name: usr
mountPath: /usr
nodeSelector:
"kubernetes.io/hostname": 192.168.119.105
volumes:
- name: devfs
hostPath:
path: /dev
- name: sysfs
hostPath:
path: /sys
- name: kubeletsockets
hostPath:
path: /var/lib/kubelet/device-plugins
- name: libsophon
hostPath:
path: /opt/tpuv7
- name: envs
hostPath:
path: /etc
- name: usrsbin
hostPath:
path: /usr/sbin
- name: usershare
hostPath:
path: /usr/share
- name: usr
hostPath:
path: /usr

View File

@@ -0,0 +1,82 @@
#!/bin/bash
harbor_host=10.22.48.3:8033
namespace=xafkapp
app_name=""
new_tag=""
download_from_oss() {
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
echo "start to download => $1"
wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
echo ""
echo ""
}
upload_image_to_harbor(){
if [ "$app_name" == "" ]; then
echo "app name null exit!"
exit 233
fi
if ! docker load < "$1"; then
echo "docker load error !"
fi
docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
docker login -u admin -p V2ryStr@ngPss $harbor_host
docker push "$harbor_host/cmii/$app_name:$new_tag"
echo ""
echo ""
}
parse_args(){
if [ "$1" == "" ]; then
echo "no zip file in error!"
exit 233
fi
local image_name="$1"
# cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
app_name=$(echo $image_name | cut -d "=" -f1)
new_tag=$(echo $image_name | cut -d "=" -f2)
}
update_image_tag(){
if [ "$new_tag" == "" ]; then
echo "new tag error!"
exit 233
fi
local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
echo "image grep is => ${image_prefix}"
echo "start to update ${namespace} ${app_name} to ${new_tag} !"
echo ""
kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
echo ""
echo "start to wait for 3 seconds!"
sleep 3
local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
echo ""
echo "new image are => $image_new"
echo ""
}
main(){
parse_args "$1"
download_from_oss "$1"
upload_image_to_harbor "$1"
update_image_tag
}
main "$@"

View File

@@ -54,8 +54,10 @@ clean_rke_cluster() {
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
rke remove --force
printf "y/n" | docker container prune
rke remove --ignore-docker-version --force
printf "y" | docker container prune
rke -d up
}

View File

@@ -0,0 +1,5 @@
172.16.243.130 mlogin.hq.cmcc
172.21.200.213 cmoa.hq.cmcc
172.21.197.38 todo.hq.cmcc
172.21.180.176 oa.hq.cmcc
192.168.78.36 oa.cdcyy.cn

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=xakny
namespace=xafkapp
# 优雅地处理Deployment缩容
scale_deployments() {
@@ -18,4 +18,4 @@ scale_statefulsets() {
}
scale_deployments
#scale_statefulsets
scale_statefulsets

View File

@@ -1,6 +1,6 @@
#!/bin/bash
namespace=xakny
namespace=xafkapp
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq

View File

@@ -6,7 +6,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=eedsjc-uavms
export name_space=xafkapp
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force

View File

@@ -2,8 +2,6 @@
name_space=xmyd
kubectl get deployments -n ${xmyd} -o custom-columns='NAME:.metadata.name,REPLICAS:.spec.replicas' --no-headers > deployments_replicas.txt

View File

@@ -11,7 +11,7 @@ umount /dev/mapper/iovg-lvdata
umount /dev/mapper/rootvg-lvtmp
umount /dev/mapper/rootvg-lvswap
umount /dev/mapper/centos-swap
umount /dev/mapper/openeuler-swap
lvdisplay

View File

@@ -50,20 +50,20 @@ t
8e
w
" | fdisk /dev/vdb
" | fdisk /dev/sdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/sdc1
vgcreate ${VG_NAME} /dev/vdb1
vgcreate ${VG_NAME} /dev/sdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
#mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
mkdir -p /home/app-plus
#mkdir -p /var/lib/docker
#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
#mkdir -p /home/app-plus
mkdir -p /var/lib/docker
selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
#export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a