diff --git a/.idea/go.imports.xml b/.idea/go.imports.xml
new file mode 100644
index 0000000..d7202f0
--- /dev/null
+++ b/.idea/go.imports.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 2050228..648e9df 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -4,23 +4,31 @@
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
+
+
+
+
+
@@ -32,6 +40,12 @@
{}
{
+ "configuredContexts": [
+ {
+ "name": "wdd-rmdc-cluster",
+ "kubeConfigUrl": "file://C:/Users/wddsh/.kube/config"
+ }
+ ],
"isMigrated": true
}
{
@@ -53,6 +67,7 @@
"RunOnceActivity.git.unshallow": "true",
"RunOnceActivity.go.formatter.settings.were.checked": "true",
"RunOnceActivity.go.migrated.go.modules.settings": "true",
+ "RunOnceActivity.typescript.service.memoryLimit.init": "true",
"SHARE_PROJECT_CONFIGURATION_FILES": "true",
"git-widget-placeholder": "main",
"go.import.settings.migrated": "true",
@@ -64,15 +79,20 @@
"nodejs_package_manager_path": "npm",
"settings.editor.selected.configurable": "editor.preferences.tabs",
"vue.rearranger.settings.migration": "true"
+ },
+ "keyToStringList": {
+ "DatabaseDriversLRU": [
+ "mysql"
+ ]
}
}
+
-
@@ -84,7 +104,8 @@
-
+
+
@@ -147,7 +168,40 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -173,7 +227,15 @@
1754963979625
-
+
+
+ 1762942452911
+
+
+
+ 1762942452911
+
+
@@ -197,9 +259,7 @@
-
-
-
- true
+
+
\ No newline at end of file
diff --git a/58-202503-新DEMO环境/批量指令.sh b/58-202503-新DEMO环境/批量指令.sh
index 17f9d97..076b988 100644
--- a/58-202503-新DEMO环境/批量指令.sh
+++ b/58-202503-新DEMO环境/批量指令.sh
@@ -21,9 +21,6 @@ for ip in ${ip_list[@]}
do
# 使用密码 123 ssh 登录到主机
echo "chpasswd <<< 'root:V2ryStrP@ss'" | ssh root@${ip}
-
done
-
-
- ssh root@192.168.40.50 <<< 'scyd@lab1234'
+ssh root@192.168.40.50 <<< 'scyd@lab1234'
diff --git a/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v2.7.0.yaml b/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v2.7.0.yaml
new file mode 100644
index 0000000..80432e2
--- /dev/null
+++ b/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v2.7.0.yaml
@@ -0,0 +1,331 @@
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: kubernetesui/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: kubernetesui/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
+
+# kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
+# kubectl create token read-only-user -n kubernetes-dashboard --duration=26280h
+
+## 1 token的管控
+# # 删除旧的绑定(为了保险起见,避免残留)
+# kubectl delete clusterrolebinding admin-user
+#
+## 2 重新创建绑定
+# kubectl create clusterrolebinding admin-user \
+# --clusterrole=cluster-admin \
+# --serviceaccount=kubernetes-dashboard:admin-user
+
+## 3 重新生成token
+# kubectl create token admin-user -n kubernetes-dashboard --duration=26280h
+
+## 你无法查看已经生成的 Token 列表。
\ No newline at end of file
diff --git a/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v7.10.2-无法启动.yaml b/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v7.10.2-无法启动.yaml
new file mode 100644
index 0000000..958e895
--- /dev/null
+++ b/68-202511-k8s升级1-30-14版本/kubernetes-dashboard-v7.10.2-无法启动.yaml
@@ -0,0 +1,420 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard-key-holder
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard-settings
+ namespace: kubernetes-dashboard
+
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+rules:
+ - apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list", "watch"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kubernetes-dashboard
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ containers:
+ - name: kubernetes-dashboard
+ image: docker.io/kubernetesui/dashboard-web:1.6.2
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ args:
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ capabilities:
+ drop:
+ - ALL
+ - name: dashboard-api
+ image: docker.io/kubernetesui/dashboard-api:1.11.1
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 9000
+ protocol: TCP
+ args:
+ - --namespace=kubernetes-dashboard
+ - --kubeconfig=
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 9000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ capabilities:
+ drop:
+ - ALL
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ "kubernetes.io/hostname": master-192.168.40.50
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 443
+ targetPort: 8000
+ selector:
+ app.kubernetes.io/name: kubernetes-dashboard
+
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ app.kubernetes.io/name: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: dashboard-metrics-scraper
+ spec:
+ securityContext:
+ seccompProfile:
+ type: RuntimeDefault
+ containers:
+ - name: dashboard-metrics-scraper
+ image: docker.io/kubernetesui/dashboard-metrics-scraper:1.2.2
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ capabilities:
+ drop:
+ - ALL
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+
+---
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ app.kubernetes.io/name: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ app.kubernetes.io/name: dashboard-metrics-scraper
+---
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ app.kubernetes.io/name: kubernetes-dashboard
+ name: kubernetes-dashboard-nodeport
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8000
+ nodePort: 39999
+ protocol: TCP
+ selector:
+ app.kubernetes.io/name: kubernetes-dashboard
+---
+#创建管理员用户(全部权限)
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+# 创建只读用户(可查看日志,不能执行和删除)
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: readonly-user
+ namespace: kubernetes-dashboard
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: readonly-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["pods", "pods/log", "services", "replicationcontrollers", "persistentvolumeclaims", "namespaces", "events", "configmaps", "secrets", "nodes", "persistentvolumes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["deployments", "daemonsets", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["jobs", "cronjobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["rbac.authorization.k8s.io"]
+ resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["metrics.k8s.io"]
+ resources: ["pods", "nodes"]
+ verbs: ["get", "list", "watch"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: readonly-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: readonly-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: readonly-user
+ namespace: kubernetes-dashboard
+
+---
+# 创建永久 Token
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: admin-user-token
+ namespace: kubernetes-dashboard
+ annotations:
+ kubernetes.io/service-account.name: admin-user
+type: kubernetes.io/service-account-token
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: readonly-user-token
+ namespace: kubernetes-dashboard
+ annotations:
+ kubernetes.io/service-account.name: readonly-user
+type: kubernetes.io/service-account-token
+---
+
+# kubectl -n kubernetes-dashboard create token admin-user --duration=87600h
+# kubectl -n kubernetes-dashboard create token readonly-user --duration=87600h
+
+# kubectl get secret admin-user-token -n kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d
+
+# kubectl get secret readonly-user-token -n kubernetes-dashboard -o jsonpath='{.data.token}' | base64 -d
\ No newline at end of file
diff --git a/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.4.xlsx b/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.4.xlsx
new file mode 100644
index 0000000..2952e0f
Binary files /dev/null and b/68-202511-k8s升级1-30-14版本/kubernetes-images-1.30.4.xlsx differ
diff --git a/69-202511-AI-GPU测试/harbor-secret.yaml b/69-202511-AI-GPU测试/harbor-secret.yaml
new file mode 100644
index 0000000..ea78517
--- /dev/null
+++ b/69-202511-AI-GPU测试/harbor-secret.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+data:
+ .dockerconfigjson:
+ ewoJImF1dGhzIjogewoJCSJoYXJib3ItcWEuc3JlLmNkY3l5LmNuIjogewoJCQkiYXV0aCI6ICJjbUZrTURKZlpISnZibVU2UkhKdmJtVkFNVEl6TkE9PSIKCQl9LAoJCSJoYXJib3Iud2RkLmlvOjgwMzMiOiB7CiAgICAgICAgICAgICAiYXV0aCI6ICJZV1J0YVc0NlUzVndaWEppYjJkbExqRXlNdz09IgogICAgICAgIH0sCiAgICAgICAgImhhcmJvci5jZGN5eS5jb20uY24iOiB7CgkJCSJhdXRoIjogImNtRmtNREpmWkhKdmJtVTZSSEp2Ym1WQU1USXpOQT09IgoJCX0KCX0KfQ==
+kind: Secret
+metadata:
+ name: harborsecret
+ namespace: cmii-rmdc
+type: kubernetes.io/dockerconfigjson
diff --git a/69-202511-AI-GPU测试/helm-minio.yaml b/69-202511-AI-GPU测试/helm-minio.yaml
new file mode 100644
index 0000000..2ca1bff
--- /dev/null
+++ b/69-202511-AI-GPU测试/helm-minio.yaml
@@ -0,0 +1,79 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ namespace: wdd-rmdc
+ name: helm-minio
+spec:
+ serviceName: helm-minio
+ replicas: 1
+ selector:
+ matchLabels:
+ app: helm-minio
+ template:
+ metadata:
+ labels:
+ app: helm-minio
+ spec:
+ imagePullSecrets:
+ - name: harborsecret
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - "worker-3-192.168.40.63"
+ containers:
+ - name: minio
+ image: harbor.cdcyy.com.cn/cmii/minio:RELEASE.2023-06-02T23-17-26Z
+ command: ["/bin/sh", "-c"]
+ args:
+ - minio server /data --console-address ":9001"
+ ports:
+ - containerPort: 9000
+ name: api
+ - containerPort: 9001
+ name: console
+ env:
+ - name: MINIO_ACCESS_KEY
+ value: "cmii"
+ - name: MINIO_SECRET_KEY
+ value: "B#923fC7mk"
+ volumeMounts:
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 2Gi
+ cpu: "2"
+ requests:
+ memory: 200Mi
+ cpu: 200m
+ volumes:
+ - name: data
+# persistentVolumeClaim:
+# claimName: helm-minio
+ hostPath:
+ path: /var/lib/docker/minio-pv/
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-minio
+ namespace: wdd-rmdc
+spec:
+ selector:
+ app: helm-minio
+ ports:
+ - name: api
+ port: 9000
+ targetPort: 9000
+ nodePort: 39000
+ - name: console
+ port: 9001
+ targetPort: 9001
+ nodePort: 39001
+ type: NodePort
diff --git a/69-202511-AI-GPU测试/k8s-emqx.yaml b/69-202511-AI-GPU测试/k8s-emqx.yaml
new file mode 100644
index 0000000..b32f932
--- /dev/null
+++ b/69-202511-AI-GPU测试/k8s-emqx.yaml
@@ -0,0 +1,392 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: helm-emqxs
+ namespace: wdd-rmdc
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-env
+ namespace: wdd-rmdc
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: base-1.0
+data:
+ # 集群相关
+ EMQX_CLUSTER__DISCOVERY: "k8s"
+ EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc.cluster.local:443"
+ EMQX_CLUSTER__K8S__APP_NAME: "helm-emqxs"
+ EMQX_CLUSTER__K8S__SERVICE_NAME: "helm-emqxs-headless"
+ EMQX_CLUSTER__K8S__ADDRESS_TYPE: "dns"
+ EMQX_CLUSTER__K8S__NAMESPACE: "wdd-rmdc"
+ EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
+
+ # 关闭匿名,默认 ACL 不匹配拒绝
+ EMQX_AUTH__ALLOW_ANONYMOUS: "false"
+ EMQX_AUTHZ__NO_MATCH: "deny"
+
+ # Dashboard 初始管理员密码(只在第一次启动时生效)
+ EMQX_DASHBOARD__DEFAULT_PASSWORD: "odD8#Ve7.B"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: helm-emqxs-init-script
+ namespace: wdd-rmdc
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+data:
+ init-mqtt-user.sh: |
+ #!/bin/sh
+ set -e
+
+ DASHBOARD_USER="admin"
+ DASHBOARD_PASS="odD8#Ve7.B"
+ MQTT_USER="admin"
+ MQTT_PASS="odD8#Ve7.B"
+
+ # 等待 EMQX 本地 API 就绪
+ EMQX_API="http://localhost:18083/api/v5"
+ echo "等待 EMQX API 就绪..."
+ for i in $(seq 1 120); do
+ if curl -s -f -m 5 "${EMQX_API}/status" > /dev/null 2>&1; then
+ echo "EMQX API 已就绪"
+ break
+ fi
+ echo "等待中... ($i/120)"
+ sleep 5
+ done
+
+ # 修改 Dashboard 管理员密码
+ echo "修改 Dashboard 管理员密码..."
+ /opt/emqx/bin/emqx ctl admins passwd "${DASHBOARD_USER}" "${DASHBOARD_PASS}" || echo "密码可能已设置"
+ echo "Dashboard 密码设置完成"
+
+ # 获取 Dashboard Token
+ echo "获取 Dashboard Token..."
+ TOKEN=$(curl -s -X POST "${EMQX_API}/login" \
+ -H 'Content-Type: application/json' \
+ -d "{\"username\":\"${DASHBOARD_USER}\",\"password\":\"${DASHBOARD_PASS}\"}" \
+ | grep -o '"token":"[^"]*' | cut -d'"' -f4)
+
+ if [ -z "$TOKEN" ]; then
+ echo "ERROR: 无法获取 Token"
+ exit 1
+ fi
+ echo "Token 获取成功"
+
+ # 创建内置数据库认证器(使用 listeners 作用域)
+ echo "检查并创建内置数据库认证器..."
+
+ # 为 tcp:default listener 添加认证器
+ echo "为 listener tcp:default 配置认证器..."
+ curl -s -X POST "${EMQX_API}/authentication/tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "mechanism": "password_based",
+ "backend": "built_in_database",
+ "user_id_type": "username",
+ "password_hash_algorithm": {
+ "name": "sha256",
+ "salt_position": "suffix"
+ }
+ }' 2>/dev/null || echo "tcp:default 认证器可能已存在"
+
+ # 为 ws:default listener 添加认证器
+ echo "为 listener ws:default 配置认证器..."
+ curl -s -X POST "${EMQX_API}/authentication/ws:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "mechanism": "password_based",
+ "backend": "built_in_database",
+ "user_id_type": "username",
+ "password_hash_algorithm": {
+ "name": "sha256",
+ "salt_position": "suffix"
+ }
+ }' 2>/dev/null || echo "ws:default 认证器可能已存在"
+
+ # 等待认证器创建完成
+ sleep 2
+
+ # 创建 MQTT 用户
+ echo "创建 MQTT 用户: ${MQTT_USER}..."
+ curl -s -X POST "${EMQX_API}/authentication/password_based:built_in_database/users?listener_id=tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"user_id\":\"${MQTT_USER}\",\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
+ 2>/dev/null || echo "用户可能已存在,尝试更新..."
+
+ # 尝试更新密码
+ curl -s -X PUT "${EMQX_API}/authentication/password_based:built_in_database/users/${MQTT_USER}?listener_id=tcp:default" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"password\":\"${MQTT_PASS}\",\"is_superuser\":true}" \
+ 2>/dev/null || true
+
+ echo "MQTT 用户创建/更新完成"
+
+ # 创建授权规则
+ echo "配置授权规则..."
+
+ # 创建内置数据库授权源
+ curl -s -X POST "${EMQX_API}/authorization/sources" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "type": "built_in_database",
+ "enable": true
+ }' 2>/dev/null || echo "授权源可能已存在"
+
+ sleep 2
+
+ # 为 admin 用户添加授权规则(使用数组格式)
+ echo "为 ${MQTT_USER} 用户添加 ACL 规则..."
+ curl -s -X POST "${EMQX_API}/authorization/sources/built_in_database/rules/users" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "[{\"username\":\"${MQTT_USER}\",\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}]" \
+ 2>/dev/null && echo "ACL 规则创建成功" || echo "规则可能已存在,尝试更新..."
+
+ # 尝试更新规则(PUT 请求需要单个对象,不是数组)
+ curl -s -X PUT "${EMQX_API}/authorization/sources/built_in_database/rules/users/${MQTT_USER}" \
+ -H "Authorization: Bearer ${TOKEN}" \
+ -H 'Content-Type: application/json' \
+ -d "{\"rules\":[{\"action\":\"all\",\"permission\":\"allow\",\"topic\":\"#\"}]}" \
+ 2>/dev/null && echo "ACL 规则更新成功" || true
+
+ echo "ACL 规则配置完成"
+ echo "初始化完成!MQTT 用户: ${MQTT_USER}"
+ echo "可通过以下方式连接:"
+ echo " - MQTT: localhost:1883"
+ echo " - WebSocket: localhost:8083"
+ echo " - Dashboard: http://localhost:18083"
+ echo " - 用户名: ${MQTT_USER}"
+
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: helm-emqxs
+ namespace: wdd-rmdc
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: base-1.0
+spec:
+ replicas: 1
+ serviceName: helm-emqxs-headless
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ template:
+ metadata:
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: base-1.0
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - worker-3-192.168.40.63
+ imagePullSecrets:
+ - name: harborsecret
+ serviceAccountName: helm-emqxs
+ containers:
+ - name: helm-emqxs
+ image: harbor.cdcyy.com.cn/cmii/emqx:5.8.8
+ imagePullPolicy: Always
+ ports:
+ - name: mqtt
+ containerPort: 1883
+ - name: mqttssl
+ containerPort: 8883
+ - name: mgmt
+ containerPort: 8081
+ - name: ws
+ containerPort: 8083
+ - name: wss
+ containerPort: 8084
+ - name: dashboard
+ containerPort: 18083
+ - name: ekka
+ containerPort: 4370
+ envFrom:
+ - configMapRef:
+ name: helm-emqxs-env
+ # 添加生命周期钩子
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - |
+ # 后台执行初始化脚本,避免阻塞容器启动
+ nohup /bin/sh /scripts/init-mqtt-user.sh > /tmp/init.log 2>&1 &
+ # 添加健康检查,确保 initContainer 执行时 API 已就绪
+ livenessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 18083
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ resources: {}
+ volumeMounts:
+ # 5.x 默认 data 目录,包含所有持久化数据
+ - name: emqx-data
+ mountPath: "/opt/emqx/data"
+ readOnly: false
+ - name: init-script
+ mountPath: /scripts
+ volumes:
+ - name: emqx-data
+ hostPath:
+ path: /var/lib/docker/emqx
+ - name: init-script
+ configMap:
+ name: helm-emqxs-init-script
+ defaultMode: 0755
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: wdd-rmdc
+rules:
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - watch
+ - list
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: helm-emqxs
+ namespace: wdd-rmdc
+subjects:
+ - kind: ServiceAccount
+ name: helm-emqxs
+ namespace: wdd-rmdc
+roleRef:
+ kind: Role
+ name: helm-emqxs
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs
+ namespace: wdd-rmdc
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: base-1.0
+spec:
+ type: NodePort
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - port: 1883
+ name: mqtt
+ targetPort: 1883
+ nodePort: 31883
+ - port: 18083
+ name: dashboard
+ targetPort: 18083
+ nodePort: 38085
+ - port: 8083
+ name: mqtt-websocket
+ targetPort: 8083
+ nodePort: 38083
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: helm-emqxs-headless
+ namespace: wdd-rmdc
+ labels:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ helm.sh/chart: emqx-1.1.0
+ app.kubernetes.io/managed-by: octopus-control
+ app.kubernetes.io/version: base-1.0
+spec:
+ type: ClusterIP
+ clusterIP: None
+ selector:
+ cmii.type: middleware
+ cmii.app: helm-emqxs
+ cmii.emqx.architecture: cluster
+ ports:
+ - name: mqtt
+ port: 1883
+ protocol: TCP
+ targetPort: 1883
+ - name: mqttssl
+ port: 8883
+ protocol: TCP
+ targetPort: 8883
+ - name: mgmt
+ port: 8081
+ protocol: TCP
+ targetPort: 8081
+ - name: websocket
+ port: 8083
+ protocol: TCP
+ targetPort: 8083
+ - name: wss
+ port: 8084
+ protocol: TCP
+ targetPort: 8084
+ - name: dashboard
+ port: 18083
+ protocol: TCP
+ targetPort: 18083
+ - name: ekka
+ port: 4370
+ protocol: TCP
+ targetPort: 4370
+---
+
+# tail -f /tmp/init.log
\ No newline at end of file
diff --git a/69-202511-AI-GPU测试/k8s-postgresql-timescaledb.yaml b/69-202511-AI-GPU测试/k8s-postgresql-timescaledb.yaml
new file mode 100644
index 0000000..f31240a
--- /dev/null
+++ b/69-202511-AI-GPU测试/k8s-postgresql-timescaledb.yaml
@@ -0,0 +1,142 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: timescaledb-config
+ namespace: wdd-rmdc
+data:
+ postgresql.conf: |
+ # 内存优化配置
+ shared_buffers = 4GB
+ effective_cache_size = 12GB
+ maintenance_work_mem = 1GB
+ checkpoint_completion_target = 0.9
+ wal_buffers = 16MB
+ default_statistics_target = 100
+ random_page_cost = 1.1
+ effective_io_concurrency = 200
+ work_mem = 128MB
+ min_wal_size = 2GB
+ max_wal_size = 8GB
+ max_worker_processes = 8
+ max_parallel_workers_per_gather = 4
+ max_parallel_workers = 8
+ max_parallel_maintenance_workers = 4
+
+ # TimescaleDB 优化
+ timescaledb.max_background_workers = 8
+ shared_preload_libraries = 'timescaledb'
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: timescaledb
+ namespace: wdd-rmdc
+spec:
+ serviceName: timescaledb
+ replicas: 1
+ selector:
+ matchLabels:
+ app: timescaledb
+ template:
+ metadata:
+ labels:
+ app: timescaledb
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - worker-2-192.168.40.62
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: timescaledb
+ image: harbor.cdcyy.com.cn/cmii/timescaledb:2.24.0-pg17
+ ports:
+ - containerPort: 5432
+ name: postgresql
+ env:
+ - name: POSTGRES_PASSWORD
+ value: "admin@123"
+ - name: POSTGRES_USER
+ value: "postgres"
+ - name: POSTGRES_DB
+ value: "timescaledb"
+ - name: PGDATA
+ value: /var/lib/postgresql/data/pgdata
+ - name: TS_TUNE_MEMORY
+ value: "16GB"
+ - name: TS_TUNE_NUM_CPUS
+ value: "8"
+ resources:
+ requests:
+ memory: "8Gi"
+ cpu: "4"
+ limits:
+ memory: "16Gi"
+ cpu: "4"
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/postgresql/data
+ - name: config
+ mountPath: /etc/postgresql/postgresql.conf
+ subPath: postgresql.conf
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U postgres
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U postgres
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ volumes:
+ - name: config
+ configMap:
+ name: timescaledb-config
+ - name: data
+ hostPath:
+ path: /var/lib/docker/postgresql_data
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: timescaledb
+ namespace: wdd-rmdc
+spec:
+ type: NodePort
+ ports:
+ - port: 5432
+ targetPort: 5432
+ nodePort: 35432
+ protocol: TCP
+ name: postgresql
+ selector:
+ app: timescaledb
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: timescaledb-headless
+ namespace: wdd-rmdc
+spec:
+ clusterIP: None
+ ports:
+ - port: 5432
+ targetPort: 5432
+ protocol: TCP
+ name: postgresql
+ selector:
+ app: timescaledb
diff --git a/69-202511-AI-GPU测试/k8s-timescaledb-16C32GB-prod.yaml b/69-202511-AI-GPU测试/k8s-timescaledb-16C32GB-prod.yaml
new file mode 100644
index 0000000..01ac93e
--- /dev/null
+++ b/69-202511-AI-GPU测试/k8s-timescaledb-16C32GB-prod.yaml
@@ -0,0 +1,187 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: timescaledb-rmdc-config
+ namespace: cmii-rmdc
+data:
+ postgresql.conf: |
+ # ========== 内存核心参数优化(32GB) ==========
+ # shared_buffers: 32GB以下推荐25%总内存
+ shared_buffers = 8GB
+
+ # effective_cache_size: 推荐50-75%总内存
+ effective_cache_size = 24GB
+
+ # work_mem: (Total RAM * 0.25) / max_connections
+ # (32GB * 0.25) / 150 ≈ 54MB,向上调整至128MB
+ work_mem = 128MB
+
+ # maintenance_work_mem: 建议1-2GB,32GB内存可提升至2GB
+ maintenance_work_mem = 2GB
+
+ # ========== WAL相关参数 ==========
+ wal_buffers = 32MB
+ min_wal_size = 4GB
+ max_wal_size = 16GB
+ checkpoint_completion_target = 0.9
+ wal_compression = on
+
+ # ========== 查询规划器参数 ==========
+ default_statistics_target = 100
+ random_page_cost = 1.1
+ effective_io_concurrency = 200
+
+ # ========== 并行查询参数 ==========
+ max_worker_processes = 16
+ max_parallel_workers_per_gather = 4
+ max_parallel_workers = 8
+ max_parallel_maintenance_workers = 4
+
+ # ========== 连接相关 ==========
+ max_connections = 150
+
+ # ========== TimescaleDB专用优化 ==========
+ timescaledb.max_background_workers = 8
+ shared_preload_libraries = 'timescaledb'
+
+ # TimescaleDB压缩优化(针对时序数据)
+ timescaledb.max_background_workers = 8
+
+ # ========== 日志配置 ==========
+ logging_collector = on
+ log_min_duration_statement = 1000
+ log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
+ log_checkpoints = on
+
+ # ========== 自动清理优化 ==========
+ autovacuum_max_workers = 4
+ autovacuum_naptime = 10s
+ autovacuum_vacuum_scale_factor = 0.05
+ autovacuum_analyze_scale_factor = 0.02
+
+ # ========== 其他性能优化 ==========
+ checkpoint_timeout = 15min
+ bgwriter_delay = 200ms
+ bgwriter_lru_maxpages = 100
+ bgwriter_lru_multiplier = 2.0
+
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: timescaledb-rmdc
+ namespace: cmii-rmdc
+spec:
+ serviceName: timescaledb-rmdc-svc
+ replicas: 1
+ selector:
+ matchLabels:
+ app: timescaledb-rmdc
+ template:
+ metadata:
+ labels:
+ app: timescaledb-rmdc
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - worker-1-192.168.40.61
+ imagePullSecrets:
+ - name: harborsecret
+ containers:
+ - name: timescaledb
+ image: harbor.wdd.io:8033/rmdc/timescaledb:2.24.0-pg17
+ ports:
+ - containerPort: 5432
+ name: postgresql
+ env:
+ - name: POSTGRES_PASSWORD
+ value: "Super@Boge.123"
+ - name: POSTGRES_USER
+ value: "postgres"
+ - name: POSTGRES_DB
+ value: "timescaledb"
+ - name: PGDATA
+ value: /var/lib/postgresql/data/pgdata
+ - name: TS_TUNE_MEMORY
+ value: "32GB"
+ - name: TS_TUNE_NUM_CPUS
+ value: "16"
+ resources:
+ requests:
+ memory: "16Gi"
+ cpu: "8"
+ limits:
+ memory: "32Gi"
+ cpu: "16"
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/postgresql/data
+ - name: config
+ mountPath: /etc/postgresql/postgresql.conf
+ subPath: postgresql.conf
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U postgres
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - pg_isready -U postgres
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 3
+ volumes:
+ - name: config
+ configMap:
+ name: timescaledb-rmdc-config
+ - name: data
+ hostPath:
+ path: /var/lib/docker/postgresql_data
+---
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: timescaledb-rmdc-svc
+ namespace: cmii-rmdc
+spec:
+ type: NodePort
+ ports:
+ - port: 5432
+ targetPort: 5432
+ nodePort: 35435
+ protocol: TCP
+ name: postgresql
+ selector:
+ app: timescaledb-rmdc
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: timescaledb-rmdc-svc-headless
+ namespace: cmii-rmdc
+spec:
+ clusterIP: None
+ ports:
+ - port: 5432
+ targetPort: 5432
+ protocol: TCP
+ name: postgresql
+ selector:
+ app: timescaledb-rmdc
\ No newline at end of file
diff --git a/69-202511-AI-GPU测试/kubernetes-dashboard-v2.7.0.yaml b/69-202511-AI-GPU测试/kubernetes-dashboard-v2.7.0.yaml
new file mode 100644
index 0000000..1c62d42
--- /dev/null
+++ b/69-202511-AI-GPU测试/kubernetes-dashboard-v2.7.0.yaml
@@ -0,0 +1,316 @@
+# ------------------- Dashboard Namespace ------------------- #
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: kubernetes-dashboard
+
+---
+# ------------------- Service Account ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Service (NodePort 39999) ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ type: NodePort
+ ports:
+ - port: 443
+ targetPort: 8443
+ nodePort: 39999
+ selector:
+ k8s-app: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Secrets ------------------- #
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-certs
+ namespace: kubernetes-dashboard
+type: Opaque
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-csrf
+ namespace: kubernetes-dashboard
+type: Opaque
+data:
+ csrf: ""
+
+---
+# ------------------- Dashboard Role (FIXED) ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+rules:
+ # [修复] 允许创建 Secrets,解决 panic 问题
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create"]
+ # 允许对特定 Secrets 进行操作
+ - apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
+ verbs: ["get", "update", "delete"]
+ # ConfigMaps 权限
+ - apiGroups: [""]
+ resources: ["configmaps"]
+ resourceNames: ["kubernetes-dashboard-settings"]
+ verbs: ["get", "update"]
+ # Metrics 权限
+ - apiGroups: [""]
+ resources: ["services"]
+ resourceNames: ["heapster", "dashboard-metrics-scraper"]
+ verbs: ["proxy"]
+ - apiGroups: [""]
+ resources: ["services/proxy"]
+ resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
+ verbs: ["get"]
+
+---
+# ------------------- Dashboard RoleBinding ------------------- #
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard-minimal
+ namespace: kubernetes-dashboard
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubernetes-dashboard-minimal
+subjects:
+ - kind: ServiceAccount
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- Dashboard Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ name: kubernetes-dashboard
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: kubernetes-dashboard
+ template:
+ metadata:
+ labels:
+ k8s-app: kubernetes-dashboard
+ spec:
+ containers:
+ - name: kubernetes-dashboard
+ image: kubernetesui/dashboard:v2.7.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8443
+ protocol: TCP
+ args:
+ - --auto-generate-certificates
+ - --namespace=kubernetes-dashboard
+ volumeMounts:
+ - name: kubernetes-dashboard-certs
+ mountPath: /certs
+ - mountPath: /tmp
+ name: tmp-volume
+ livenessProbe:
+ httpGet:
+ scheme: HTTPS
+ path: /
+ port: 8443
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ volumes:
+ - name: kubernetes-dashboard-certs
+ secret:
+ secretName: kubernetes-dashboard-certs
+ - name: tmp-volume
+ emptyDir: {}
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ "kubernetes.io/hostname": master-192.168.40.50
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ------------------- Metrics Scraper Service ------------------- #
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ ports:
+ - port: 8000
+ targetPort: 8000
+ selector:
+ k8s-app: dashboard-metrics-scraper
+
+---
+# ------------------- Metrics Scraper Deployment ------------------- #
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ name: dashboard-metrics-scraper
+ namespace: kubernetes-dashboard
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: dashboard-metrics-scraper
+ template:
+ metadata:
+ labels:
+ k8s-app: dashboard-metrics-scraper
+ annotations:
+ seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
+ spec:
+ containers:
+ - name: dashboard-metrics-scraper
+ image: kubernetesui/metrics-scraper:v1.0.8
+ ports:
+ - containerPort: 8000
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ scheme: HTTP
+ path: /
+ port: 8000
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-volume
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsUser: 1001
+ runAsGroup: 2001
+ serviceAccountName: kubernetes-dashboard
+ nodeSelector:
+ "kubernetes.io/os": linux
+ "kubernetes.io/hostname": master-192.168.40.50
+ volumes:
+ - name: tmp-volume
+ emptyDir: {}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+
+---
+# ==================================================================
+# 自定义用户配置部分 (ADMIN & READ-ONLY)
+# ==================================================================
+
+# ------------------- 1. Admin User (全部权限) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: admin-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: admin-user
+ namespace: kubernetes-dashboard
+
+---
+# ------------------- 2. Read-Only User (只读+看日志) ------------------- #
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: read-only-user
+ namespace: kubernetes-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: dashboard-view-with-logs
+rules:
+ - apiGroups: [""]
+ resources: ["configmaps", "endpoints", "persistentvolumeclaims", "pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "nodes", "persistentvolumeclaims", "persistentvolumes", "namespaces"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["pods/log"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["batch"]
+ resources: ["cronjobs", "jobs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["ingresses", "networkpolicies"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: read-only-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: dashboard-view-with-logs
+subjects:
+ - kind: ServiceAccount
+ name: read-only-user
+ namespace: kubernetes-dashboard
\ No newline at end of file
diff --git a/69-202511-AI-GPU测试/rke-13014-cluster-security.yml b/69-202511-AI-GPU测试/rke-13014-cluster-security.yml
index 3f5d480..3532eba 100644
--- a/69-202511-AI-GPU测试/rke-13014-cluster-security.yml
+++ b/69-202511-AI-GPU测试/rke-13014-cluster-security.yml
@@ -24,6 +24,31 @@ nodes:
internal_address: 192.168.119.106
labels:
uavcloud.env: demo
+ - address: 192.168.40.61
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.40.61
+ hostname_override: "worker-1-192.168.40.61"
+ labels:
+ uavcloud.env: demo
+ - address: 192.168.40.62
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.40.62
+ hostname_override: "worker-2-192.168.40.62"
+ labels:
+ uavcloud.env: demo
+ - address: 192.168.40.63
+ user: root
+ role:
+ - worker
+ internal_address: 192.168.40.63
+ hostname_override: "worker-3-192.168.40.63"
+ labels:
+ uavcloud.env: demo
+
authentication:
strategy: x509
@@ -80,7 +105,7 @@ services:
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
- always_pull_images: true
+ always_pull_images: false
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
@@ -141,7 +166,8 @@ network:
mtu: 1440
options:
flannel_backend_type: vxlan
- plugin: calico
+
+ plugin: flannel
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
diff --git a/69-202511-AI-GPU测试/tpu_plugin_pcie.yaml b/69-202511-AI-GPU测试/tpu_plugin_pcie.yaml
new file mode 100644
index 0000000..b29a388
--- /dev/null
+++ b/69-202511-AI-GPU测试/tpu_plugin_pcie.yaml
@@ -0,0 +1,75 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: sophon-device-plugin
+ namespace: kube-system
+ labels:
+ app: sophon-device-plugin
+spec:
+ selector:
+ matchLabels:
+ app: sophon-device-plugin
+ template:
+ metadata:
+ labels:
+ app: sophon-device-plugin
+ spec:
+ containers:
+ - name: sophon-device-plugin
+ image: sophon-device-plugin:1.1.9
+ imagePullPolicy: IfNotPresent # 必须在这里设置
+ securityContext:
+ privileged: true
+ env:
+ - name: TPU_INSTANCE_NUM # only valid when target num bigger than physical chip num
+ value: "0"
+ - name: MIX_MODE_CHIP_NUM # only for bm1686 chips
+ value: "0"
+ - name: NAME_WITH_SN_MODE
+ value: "0"
+# - name: TPU_INSTANCE_NUM_PER_CHIP
+# value: "1:1:1"
+ volumeMounts:
+ - name: devfs
+ mountPath: /dev
+ - name: sysfs
+ mountPath: /sys
+ - name: kubeletsockets
+ mountPath: /var/lib/kubelet/device-plugins
+ - name: libsophon
+ mountPath: /opt/tpuv7
+ - name: envs
+ mountPath: /etc
+ - name: usrsbin
+ mountPath: /usr/sbin
+ - name: usershare
+ mountPath: /usr/share
+ - name: usr
+ mountPath: /usr
+ nodeSelector:
+ "kubernetes.io/hostname": 192.168.119.105
+ volumes:
+ - name: devfs
+ hostPath:
+ path: /dev
+ - name: sysfs
+ hostPath:
+ path: /sys
+ - name: kubeletsockets
+ hostPath:
+ path: /var/lib/kubelet/device-plugins
+ - name: libsophon
+ hostPath:
+ path: /opt/tpuv7
+ - name: envs
+ hostPath:
+ path: /etc
+ - name: usrsbin
+ hostPath:
+ path: /usr/sbin
+ - name: usershare
+ hostPath:
+ path: /usr/share
+ - name: usr
+ hostPath:
+ path: /usr
diff --git a/70-202511-XA低空平台/cmii-update.sh b/70-202511-XA低空平台/cmii-update.sh
new file mode 100644
index 0000000..6a4cab5
--- /dev/null
+++ b/70-202511-XA低空平台/cmii-update.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+harbor_host=10.22.48.3:8033
+namespace=xafkapp
+app_name=""
+new_tag=""
+
+download_from_oss() {
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+
+ echo "start to download => $1"
+ wget "https://oss.demo.uavcmlc.com/cmlc-installation/tmp/$1"
+
+ echo ""
+ echo ""
+}
+
+upload_image_to_harbor(){
+ if [ "$app_name" == "" ]; then
+ echo "app name null exit!"
+ exit 233
+ fi
+
+ if ! docker load < "$1"; then
+ echo "docker load error !"
+ fi
+ docker tag "harbor.cdcyy.com.cn/cmii/$app_name:$new_tag" "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+ echo "upload_image_to_harbor - start to push to => $harbor_host/cmii/$app_name:$new_tag"
+ docker login -u admin -p V2ryStr@ngPss $harbor_host
+ docker push "$harbor_host/cmii/$app_name:$new_tag"
+ echo ""
+ echo ""
+
+}
+
+parse_args(){
+ if [ "$1" == "" ]; then
+ echo "no zip file in error!"
+ exit 233
+ fi
+ local image_name="$1"
+
+ # cmii-uav-surveillance=5.2.0-27031-cqga=2024-03-04=573.tar.gz
+ app_name=$(echo $image_name | cut -d "=" -f1)
+ new_tag=$(echo $image_name | cut -d "=" -f2)
+}
+
+update_image_tag(){
+ if [ "$new_tag" == "" ]; then
+ echo "new tag error!"
+ exit 233
+ fi
+
+ local image_prefix=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}' | cut -d":" -f1)
+
+ echo "image grep is => ${image_prefix}"
+
+ echo "start to update ${namespace} ${app_name} to ${new_tag} !"
+ echo ""
+ kubectl -n ${namespace} patch deployment "${app_name}" -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"${app_name}\",\"image\": \"${harbor_host}/cmii/$app_name:${new_tag}\"}]}}}}"
+ echo ""
+ echo "start to wait for 3 seconds!"
+ sleep 3
+ local image_new=$(kubectl -n ${namespace} get deployment "${app_name}" -o=jsonpath='{.spec.template.spec.containers[*].image}')
+ echo ""
+ echo "new image are => $image_new"
+ echo ""
+}
+
+main(){
+ parse_args "$1"
+ download_from_oss "$1"
+ upload_image_to_harbor "$1"
+ update_image_tag
+}
+
+main "$@"
\ No newline at end of file
diff --git a/998-常用脚本/a-部署脚本/清理rke集群的安装.sh b/998-常用脚本/a-部署脚本/清理rke集群的安装.sh
index 41916f3..0e197c8 100644
--- a/998-常用脚本/a-部署脚本/清理rke集群的安装.sh
+++ b/998-常用脚本/a-部署脚本/清理rke集群的安装.sh
@@ -54,8 +54,10 @@ clean_rke_cluster() {
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
- rke remove --force
- printf "y/n" | docker container prune
+ rke remove --ignore-docker-version --force
+ printf "y" | docker container prune
+
+
rke -d up
}
diff --git a/998-常用脚本/cmii-hosts.txt b/998-常用脚本/cmii-hosts.txt
new file mode 100644
index 0000000..2e75d41
--- /dev/null
+++ b/998-常用脚本/cmii-hosts.txt
@@ -0,0 +1,5 @@
+172.16.243.130 mlogin.hq.cmcc
+172.21.200.213 cmoa.hq.cmcc
+172.21.197.38 todo.hq.cmcc
+172.21.180.176 oa.hq.cmcc
+192.168.78.36 oa.cdcyy.cn
\ No newline at end of file
diff --git a/998-常用脚本/备份脚本/关停全部的服务.sh b/998-常用脚本/备份脚本/关停全部的服务.sh
index 47344da..8d33edc 100644
--- a/998-常用脚本/备份脚本/关停全部的服务.sh
+++ b/998-常用脚本/备份脚本/关停全部的服务.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-namespace=xakny
+namespace=xafkapp
# 优雅地处理Deployment缩容
scale_deployments() {
@@ -18,4 +18,4 @@ scale_statefulsets() {
}
scale_deployments
-#scale_statefulsets
+scale_statefulsets
diff --git a/998-常用脚本/备份脚本/备份命名空间.sh b/998-常用脚本/备份脚本/备份命名空间.sh
index b0d0b29..b442ac3 100644
--- a/998-常用脚本/备份脚本/备份命名空间.sh
+++ b/998-常用脚本/备份脚本/备份命名空间.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-namespace=xakny
+namespace=xafkapp
install_yq() {
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
diff --git a/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh b/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh
index 59a7877..ce9cbf6 100644
--- a/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh
+++ b/998-常用脚本/故障恢复脚本/删除状态不为Running的Pod.sh
@@ -6,7 +6,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
-export name_space=eedsjc-uavms
+export name_space=xafkapp
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
diff --git a/998-常用脚本/更新脚本/副本数调整.sh b/998-常用脚本/更新脚本/副本数调整.sh
index 1d934da..7bfd435 100644
--- a/998-常用脚本/更新脚本/副本数调整.sh
+++ b/998-常用脚本/更新脚本/副本数调整.sh
@@ -2,8 +2,6 @@
name_space=xmyd
-
-
kubectl get deployments -n ${xmyd} -o custom-columns='NAME:.metadata.name,REPLICAS:.spec.replicas' --no-headers > deployments_replicas.txt
diff --git a/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh b/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh
index 33b44ba..e03c782 100644
--- a/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh
+++ b/998-常用脚本/研发环境相关DEMO/Core集群磁盘整理.sh
@@ -11,7 +11,7 @@ umount /dev/mapper/iovg-lvdata
umount /dev/mapper/rootvg-lvtmp
umount /dev/mapper/rootvg-lvswap
-umount /dev/mapper/centos-swap
+umount /dev/mapper/openeuler-swap
lvdisplay
diff --git a/998-常用脚本/磁盘脚本/0-挂载磁盘.sh b/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
index 4cd13c0..d7f1b37 100644
--- a/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
+++ b/998-常用脚本/磁盘脚本/0-挂载磁盘.sh
@@ -50,20 +50,20 @@ t
8e
w
-" | fdisk /dev/vdb
+" | fdisk /dev/sdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend rootvg /dev/sdc1
-vgcreate ${VG_NAME} /dev/vdb1
+vgcreate ${VG_NAME} /dev/sdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
#mkfs.ext4 /dev/mapper/${VG_NAME}-lvdata
-mkdir -p /home/app-plus
-#mkdir -p /var/lib/docker
-#selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
-export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
+#mkdir -p /home/app-plus
+mkdir -p /var/lib/docker
+selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
+#export selffstab="/dev/mapper/${VG_NAME}-lvdata /home/app-plus xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a