This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
kind: Secret
apiVersion: v1
metadata:
name: harborsecret
namespace: kube-system
data:
.dockerconfigjson: >-
ewoJImF1dGhzIjogewoJCSJoYXJib3ItcWEuc3JlLmNkY3l5LmNuIjogewoJCQkiYXV0aCI6ICJjbUZrTURKZlpISnZibVU2UkhKdmJtVkFNVEl6TkE9PSIKCQl9LAogICAgICAgICAgICAgICAgImhhcmJvci5jZGN5eS5jb20uY24iOiB7CgkJCSJhdXRoIjogImNtRmtNREpmWkhKdmJtVTZSSEp2Ym1WQU1USXpOQT09IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy45IChsaW51eCkiCgl9Cn0=
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,312 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30554
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: kubernetes-dashboard
image: harbor.cdcyy.com.cn/cmii/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
imagePullSecrets:
- name: harborsecret
containers:
- name: dashboard-metrics-scraper
image: harbor.cdcyy.com.cn/cmii/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
---
# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

View File

@@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: v0.8.0
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-vip-ds
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: kube-vip-ds
app.kubernetes.io/version: v0.8.0
spec:
imagePullSecrets:
- name: harborsecret
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/etcd
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/controlplane
operator: Exists
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: svc_enable
value: "true"
- name: svc_leasename
value: plndr-svcs-lock
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.35.178
- name: prometheus_server
value: :3112
image: harbor.cdcyy.com.cn/cmii/kube-vip:v0.8.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
hostNetwork: true
serviceAccountName: kube-vip
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy: {}
status:
currentNumberScheduled: 0
desiredNumberScheduled: 0
numberMisscheduled: 0
numberReady: 0

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
name: system:kube-vip-role
rules:
- apiGroups: [""]
resources: ["services/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["list","get","watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","get","watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["list", "get", "watch", "update", "create"]
- apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"]
verbs: ["list","get","watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -0,0 +1,264 @@
nodes:
- address: dev-master-01
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.35.105
labels:
ingress-deploy: true
- address: dev-master-02
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.35.114
labels:
ingress-deploy: true
- address: dev-master-03
user: root
role:
- controlplane
- etcd
- worker
internal_address: 192.168.35.115
labels:
ingress-deploy: true
- address: dev-worker-01
user: root
role:
- worker
internal_address: 192.168.35.55
- address: dev-worker-02
user: root
role:
- worker
internal_address: 192.168.35.86
- address: dev-worker-03
user: root
role:
- worker
internal_address: 192.168.35.89
- address: dev-worker-04
user: root
role:
- worker
internal_address: 192.168.35.93
- address: dev-worker-05
user: root
role:
- worker
internal_address: 192.168.35.95
- address: dev-worker-06
user: root
role:
- worker
internal_address: 192.168.35.96
- address: dev-worker-07
user: root
role:
- worker
internal_address: 192.168.35.101
- address: dev-worker-08
user: root
role:
- worker
internal_address: 192.168.35.103
- address: dev-worker-09
user: root
role:
- worker
internal_address: 192.168.35.104
authentication:
strategy: x509
sans:
- "192.168.35.105"
- "192.168.35.114"
- "192.168.35.115"
- "192.168.35.178"
#private_registries:
# - url: A1C2IP:8033 # 私有镜像库地址
# user: admin
# password: "V2ryStr@ngPss"
# is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: eth0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,62 @@
#!/bin/bash
master_server_list=(192.168.35.105 192.168.35.114 192.168.35.115)
worker_server_list=(192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
all_server_list=(192.168.35.105 192.168.35.114 192.168.35.115 192.168.35.55 192.168.35.86 192.168.35.89 192.168.35.93 192.168.35.95 192.168.35.96 192.168.35.101 192.168.35.103 192.168.35.104)
count=1
for server in "${all_server_list[@]}";do
echo "server is ${server}"
# sshpass -p "scyd@lab1234" ssh root@"${server}" "echo \"root:tY\$@Go7jS#h2\" | chpasswd"
#
# sshpass -p "scyd@lab1234" ssh root@"${server}" "echo \"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO6IjOcco9eAvA/T7LDsylCgjKJlLrVXY1zxO1/mX/MTzVZGuAhbikFJT2ZN2Up8iED+pJwpcps3LlA1wOjQC3Q= root@ops-04.ecs.io\" >> /root/.ssh/authorized_keys"
# host_name="dev-worker-0"${count}
# ((count++))
# echo "${server} $host_name" >> /root/wdd/1.txt
# echo ""
# ssh root@"${server}" "hostnamectl set-hostname $host_name"
# ssh root@"${server}" "hostname"
# echo ""
# ssh root@"${server}" "echo yes"
# ssh root@"${server}" "mkdir /root/wdd/"
# scp /root/wdd/235.txt root@${server}:/root/wdd/234.txt
# ssh root@"${server}" "cat /root/wdd/234.txt > /etc/ssh/sshd_config"
# ssh root@"${server}" "systemctl restart sshd"
# scp /root/wdd/* root@"${server}":/root/wdd/
# ssh root@"${server}" "cat /root/wdd/1.txt >> /etc/hosts"
# ssh root@"${server}" "echo \"nameserver 192.168.34.40\" > /etc/resolv.conf && echo \"nameserver 223.5.5.5\" >> /etc/resolv.conf"
# ssh root@"${server}" "yum install -y net-tools iputils bind-utils lsof curl wget vim mtr htop"
# ssh root@"${server}" "chmod +x /root/wdd/octopus-agent_linux_amd64 && echo selinux | /root/wdd/octopus-agent_linux_amd64 -mode=bastion && echo swap | /root/wdd/octopus-agent_linux_amd64 -mode=bastion && echo firewall | /root/wdd/octopus-agent_linux_amd64 -mode=bastion && echo sysconfig | /root/wdd/octopus-agent_linux_amd64 -mode=bastion"
# ssh root@"${server}" "echo ssh | /root/wdd/octopus-agent_linux_amd64 -mode=bastion && echo docker | /root/wdd/octopus-agent_linux_amd64 -mode=bastion"
# scp /etc/docker/daemon.json root@"${server}":/etc/docker/daemon.json
# ssh root@"${server}" "echo \"192.168.92.185 harbor.cdcyy.com.cn\" >> /etc/hosts && systemctl restart docker"
# ssh root@"${server}" "docker info"
# ssh root@"${server}" "sed -i '/^$/d' /root/.ssh/id_ed25519"
# ssh root@"${server}" "rm -rf /root/wdd/*"
#
# ssh root@"${server}" "chmod +x /root/wdd/kubectl && chmod +x /root/wdd/rke && mv /root/wdd/kubectl /usr/local/bin/kubectl && mv /root/wdd/rke /usr/local/bin/rke"
# scp /root/wdd/disk.sh root@"${server}":/root/wdd/disk.sh
# ssh root@"${server}" "chmod +x /root/wdd/disk.sh && bash /root/wdd/disk.sh"
# ssh root@"${server}" "docker login -u rad02_drone -p Drone@1234 harbor.cdcyy.com.cn"
# ssh root@"${server}" "echo \"192.168.34.40 yfcsnfs.com\" >> /etc/hosts"
# ssh root@"${server}" "yum install -y nfs-utils rpcbind"
ssh root@"${server}" "yum install -y chrony && systemctl start chronyd && systemctl enable chronyd"
# ssh root@"${server}" "systemctl start nfs nfs-client nfs-common && systemctl enable nfs nfs-client nfs-common && systemctl status nfs nfs-client nfs-common -l | grep \"active (running)\""
done

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env bash
systemctl stop docker
rm -rf /var/lib/docker
rm -rf /var/lib/kubelet
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-root
# 自定义 安装lvm2'
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
export VG_NAME=datavg
vgcreate ${VG_NAME} /dev/vdb1
export selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
partprobe
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /var/lib/docker
export selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
df -TH | grep /var/lib/docker
sleep 3
systemctl restart docker
docker info