This commit is contained in:
zeaslity
2024-10-30 16:30:51 +08:00
commit 437acbeb63
3363 changed files with 653948 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname storage-1
# sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
192.168.8.65 master-node
192.168.8.66 worker-1
192.168.8.67 worker-2
192.168.8.68 storage-1
EOF
bash <(curl -L -s https://cdn.jsdelivr.net/gh/teddysun/across/bbr.sh)

View File

@@ -0,0 +1,80 @@
#! /bin/bash
## 关闭虚拟缓存
#swapoff -a
#cp -f /etc/fstab /etc/fstab_bak
#cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
# 如果已经存在卷组,直接进行添加
# vgextend /dev/mapper/centos /dev/vda3
vgcreate ${VG_NAME} /dev/vdb1
selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
# 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"
# 扩容根目录,${VG_NAME}-root 通过df -Th获取需要扩容的文件系统
# lvextend -l +100%FREE /dev/mapper/${VG_NAME}-root
# xfs_growfs /dev/mapper/${VG_NAME}-roo
#
#
VG_NAME=sata-data
# vgcreate ${VG_NAME} /dev/vdb1
selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
## 大小根据实际情况调整
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,143 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
#### 需要修改以下的内容 ###
cmlc_app_image_list="cmlc-app-images.txt" # 需要修改版本
rancher_image_list="kubernetes-images-2.5.7.txt" # 一般不需要修改
middleware_image_list="middleware-images.txt" # 一般不需要修改
DockerRegisterDomain="192.168.8.65:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
#### 需要修改以上的内容 ###
downloadAllNeededImages() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
shift
done
}
downloadAllNeededImagesAndCompress() {
while [[ $# > 0 ]]; do
pulled=""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "开始下载:${i}"
if docker pull "${i}" >/dev/null 2>&1; then
echo "Image pull success: ${i}"
pulled="${pulled} ${i}"
else
if docker inspect "${i}" >/dev/null 2>&1; then
pulled="${pulled} ${i}"
else
echo "Image pull failed: ${i}"
fi
fi
echo "-------------------------------------------------"
done <"${1}"
compressPacName="$(echo ${1} | cut -d"." -f1).tar.gz"
echo "Creating ${compressPacName} with $(echo ${pulled} | wc -w | tr -d '[:space:]') images"
docker save $(echo ${pulled}) | gzip --stdout > ${compressPacName}
shift
done
}
pushRKEImageToHarbor(){
linux_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
linux_images+=("${i}");
done < "${rancher_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for i in "${linux_images[@]}"; do
[ -z "${i}" ] && continue
case $i in
*/*)
image_name="${DockerRegisterDomain}/${i}"
;;
*)
image_name="${DockerRegisterDomain}/rancher/${i}"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${i}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushCMLCAPPImageToHarbor(){
app_images=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
app_images+=("${i}");
done < "${cmlc_app_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${app_images[@]}"; do
[ -z "${app}" ] && continue
image_name="${DockerRegisterDomain}/$(echo ${app} | cut -d"/" -f2-8)"
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
pushMiddlewareImageToHarbor(){
middleware_image=()
while IFS= read -r i; do
[ -z "${i}" ] && continue
middleware_image+=("${i}");
done < "${middleware_image_list}"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
for app in "${middleware_image[@]}"; do
[ -z "${app}" ] && continue
case ${app} in
*/*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f3-8)"
;;
*/*)
image_name="${DockerRegisterDomain}/cmii/$(echo "${app}" | cut -d"/" -f2-8)"
;;
esac
echo "开始镜像至私有仓库推送:${image_name}"
docker tag "${app}" "${image_name}"
docker push "${image_name}"
echo "-------------------------------------------------"
done
}
#downloadAllNeededImagesAndCompress "kubernetes-images-2.5.7.txt" "middleware-images.txt"
#downloadAllNeededImages "kubernetes-images-2.5.7.txt" "middleware-images.txt"
pushRKEImageToHarbor
pushMiddlewareImageToHarbor

View File

@@ -0,0 +1,235 @@
#!/usr/bin/env bash
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
### 需要修改以下的内容 ###
# 理论上来说,能访问公网的服务器 用来部署Harbor服务器
# 所有的主机均可以访问公网的话,填写 除了harbor服务器的 其他所有主机的地址
PrivateServerIPs=(192.168.8.66 192.168.8.67 192.168.8.68) # 内网服务器的IP地址不包括可以访问公网IP的服务器
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
### 需要修改以上的内容 ###
RED="31m" ## 姨妈红
GREEN="32m" ## 水鸭青
YELLOW="33m" ## 鸭屎黄
PURPLE="35m" ## 基佬紫
BLUE="36m" ## 天依蓝
colorEcho() {
# shellcheck disable=SC2145
echo -e "\033[${1}${@:2}\033[0m" 1>&2
}
check_root() {
if [[ $EUID != 0 ]]; then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
startFunc(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始执行 启动RKE集群的操作 ………"
echo ""
colorEcho ${BLUE} "本脚本的运行有一些列的前提依赖,请确定以下的项目都已完成!!!!"
colorEcho ${YELLOW} "----------------------------------------------------------"
colorEcho ${RED} "1. 完成基础环境初始化将rke系统镜像均上传至私有Harbor中"
colorEcho ${RED} "2. 配置并修改好 rke集群的模板文件命名为 cluster.yml !!"
colorEcho ${RED} "3. ……"
colorEcho ${YELLOW} "----------------------------------------------------------"
while true; do
colorEcho ${RED} "请确保您已经将上述的项目完成!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认上述的项目均已完成!!"
colorEcho ${GREEN} "----------------------------------------------------------"
echo ""
colorEcho ${BLUE} "开始执行 RKE集群的启动过程"
echo ""
main
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
break
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
}
installRKE(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始下载并安装 RKE 工具 ………"
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载rke工具……"
wget http://rancher-mirror.cnrancher.com/rke/v1.2.6/rke_linux-amd64
if [ -s rke_linux-amd64 ]; then
colorEcho ${GREEN} "rke工具下载完成"
chmod +x rke_linux-amd64
mv ./rke_linux-amd64 /usr/local/bin/rke
colorEcho ${GREEN} "----------------------------------------------------------"
rke --version
colorEcho ${GREEN} "----------------------------------------------------------"
rke config --list-version --all
echo ""
colorEcho ${BLUE} "开始从rancher镜像下载 kubectl 工具……"
wget http://rancher-mirror.cnrancher.com/kubectl/v1.20.4/linux-amd64-v1.20.4-kubectl
chmod +x linux-amd64-v1.20.4-kubectl
mv linux-amd64-v1.20.4-kubectl /usr/local/bin/kubectl
colorEcho ${GREEN} "----------------------------------------------------------"
kubectl version
colorEcho ${GREEN} "----------------------------------------------------------"
else
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
colorEcho ${RED} "rke工具下载失败脚本无法继续运行请手动下载rke工具"
return 1
fi
}
createRKEInstallerUser(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer 用户………"
echo ""
useradd rke-installer
echo "rke-installer
rke-installer
" | passwd rke-installer
#将登陆用户develop加入到docker用户组中
gpasswd -a rke-installer docker
#更新用户组
newgrp docker
echo ""
if [ -d /home/rke-installer ]; then
colorEcho ${GREEN} "rke-installer 用户创建成功!! "
echo ""
else
colorEcho ${YELLOW} "检测到 rke-installer 用户已经存在"
fi
if [[ -s cluster.yaml || -s cluster.yml ]]; then
colorEcho ${BLUE} "开始将 cluster.yaml文件复制到 rke-installer目录下…………"
mv cluster.y* /home/rke-installer/cluster.yml
if [ -s /home/rke-installer/cluster.yml ]; then
colorEcho ${BLUE} "cluster.yml文件已经放置完成"
chown rke-installer:rke-installer /home/rke-installer/cluster.yml
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
return 1
fi
else
colorEcho ${RED} "当前目录下未检测到 rke集群的模板文件"
colorEcho ${RED} "程序无法继续,将退出!!"
echo ""
colorEcho ${YELLOW} "--------------------------------------------------"
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${RED} "请创建RKE集群的模板文件并命名为 cluster.yml "
colorEcho ${YELLOW} "--------------------------------------------------"
return 1
fi
colorEcho ${BLUE} "开始切换当前用户至 rke-installer "
su rke-installer
echo ""
colorEcho ${BLUE} "请检查rke-installer用户能否执行 docker ps 命令!!"
docker ps
colorEcho ${BLUE} "----------------------------------------------------------"
}
generateRKEUserKey(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始创建 rke-installer用户的 ssh key ……"
echo ""
su rke-installer
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
colorEcho ${GREEN} "--------------------------------------------------------------"
colorEcho ${GREEN} "-----------本机配置完成!-------------"
echo ""
for ip in "${PrivateServerIPs[@]}"; do
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行"
colorEcho ${BLUE} "-----------------------------------------------"
echo ""
echo ""
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
colorEcho ${RED} " 请以 root 角色 运行!!! "
echo ""
echo "useradd rke-installer && echo \"rke-installer
rke-installer
\" | passwd rke-installer && gpasswd -a rke-installer docker && newgrp docker && su rke-installer && docker ps "
echo ""
echo "clear && ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys"
echo ""
echo ""
while true; do
colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!"
read -r -p "请输入yes进行确认脚本才可继续运行" input
case $input in
yes)
colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key"
echo ""
break
;;
*)
echo ""
colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认"
colorEcho ${RED} "请在主机${ip}上执行上述命令!!!"
colorEcho ${RED} "否则本脚本的功能会失效!!"
colorEcho ${RED} "-----------------------------------------------------"
echo ""
;;
esac
done
done
}
startRKECLuster(){
colorEcho ${PURPLE} "---------------------------------------------------------------------------------"
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
colorEcho ${BLUE} "开始 启动 rke集群 "
echo ""
if [[ $(pwd) == "/home/rke-installer" ]]; then
colorEcho ${BLUE} "检测到当前目录为 /home/rke-installer"
echo ""
colorEcho ${BLUE} "开始执行 RKE 集群的启动过程 "
colorEcho ${BLUE} "-------------------------------------------------------------"
for i in {3..1..-1}; do
colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-准备启动RKE上文的日志输出将会消失"
sleep 2
done
clear
rke up
else
colorEcho ${BLUE} "当前目录不为 /home/rke-installer开始跳转目录"
cd /home/rke-installer
startRKECLuster
fi
}
main(){
check_root
generateRKEUserKey || return $?
startRKECLuster || return $?
}
startFunc

View File

@@ -0,0 +1,36 @@
harbor-qa.sre.cdcyy.cn/cmii/cmii-admin-data:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-admin-gateway:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-admin-user:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-open-gateway:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-airspace:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-brain:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-cloud-live:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-clusters:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-data-post-process:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-developer:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-device:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-gateway:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-hyperspectral-consumer:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-hyperspectral-provider:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-kpi-monitor:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-logger:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-material-warehouse:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-mission:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-mqtthandler:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-notice:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-oauth:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-process:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-security-system:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-surveillance:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-user:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-waypoint:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-ai-brain:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-hyperspectral:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-mws:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-mws-admin:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-oms:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-open:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-splice:2.1.14
harbor-qa.sre.cdcyy.cn/cmii/cmii-uav-platform-splice-visual:2.1.14

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system

View File

@@ -0,0 +1,299 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.4
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@@ -0,0 +1,64 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.13.4
rancher/calico-ctl:v3.13.4
rancher/calico-kube-controllers:v3.13.4
rancher/calico-node:v3.13.4
rancher/calico-pod2daemon-flexvol:v3.13.4
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/coredns-coredns:1.6.9
rancher/coreos-etcd:v3.4.3-rancher1
rancher/coreos-flannel:v0.12.0
rancher/coreos-flannel:v0.13.0-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.18.16-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.7
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.18.6
rancher/kubernetes-external-dns:v0.7.3
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.3.6
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.35.0-rancher2
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.1
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.12.0
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,61 @@
busybox
rancher/backup-restore-operator:v1.0.3
rancher/calico-cni:v3.17.2
rancher/calico-ctl:v3.17.2
rancher/calico-kube-controllers:v3.17.2
rancher/calico-node:v3.17.2
rancher/calico-pod2daemon-flexvol:v3.17.2
rancher/cis-operator:v1.0.3
rancher/cluster-proportional-autoscaler:1.7.1
rancher/coredns-coredns:1.8.0
rancher/coreos-etcd:v3.4.14-rancher1
rancher/coreos-kube-state-metrics:v1.9.7
rancher/coreos-prometheus-config-reloader:v0.39.0
rancher/coreos-prometheus-operator:v0.39.0
rancher/externalip-webhook:v0.1.6
rancher/flannel-cni:v0.3.0-rancher6
rancher/fleet-agent:v0.3.4
rancher/fleet:v0.3.4
rancher/fluentd:v0.1.24
rancher/grafana-grafana:7.1.5
rancher/hyperkube:v1.20.4-rancher1
rancher/jimmidyson-configmap-reload:v0.3.0
rancher/k8s-dns-dnsmasq-nanny:1.15.2
rancher/k8s-dns-kube-dns:1.15.2
rancher/k8s-dns-node-cache:1.15.13
rancher/k8s-dns-sidecar:1.15.2
rancher/klipper-lb:v0.1.2
rancher/kube-api-auth:v0.1.4
rancher/kubectl:v1.20.4
rancher/kubernetes-external-dns:v0.7.3
rancher/library-busybox:1.31.1
rancher/library-busybox:1.32.1
rancher/library-nginx:1.19.2-alpine
rancher/library-traefik:1.7.19
rancher/local-path-provisioner:v0.0.11
rancher/local-path-provisioner:v0.0.14
rancher/local-path-provisioner:v0.0.19
rancher/log-aggregator:v0.1.7
rancher/istio-kubectl:1.5.10
rancher/metrics-server:v0.4.1
rancher/configmap-reload:v0.3.0-rancher4
rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
rancher/opa-gatekeeper:v3.1.0-beta.7
rancher/openzipkin-zipkin:2.14.2
rancher/pause:3.2
rancher/plugins-docker:18.09
rancher/prom-alertmanager:v0.21.0
rancher/prom-node-exporter:v1.0.1
rancher/prom-prometheus:v2.18.2
rancher/prometheus-auth:v0.2.1
rancher/rancher-agent:v2.5.7
rancher/rancher-webhook:v0.1.0-beta9
rancher/rancher:v2.5.7
rancher/rke-tools:v0.1.72
rancher/security-scan:v0.1.14
rancher/security-scan:v0.2.2
rancher/shell:v0.1.6
rancher/sonobuoy-sonobuoy:v0.16.3
rancher/system-upgrade-controller:v0.6.2

View File

@@ -0,0 +1,18 @@
docker.io/bitnami/redis:6.2.6-debian-10-r0
docker.io/bitnami/mysql:8.0.26-debian-10-r0
docker.io/bitnami/bitnami-shell:10-debian-10-r140
docker.io/bitnami/rabbitmq:3.9.12-debian-10-r3
docker.io/bitnami/minio:2021.3.26-debian-10-r0
docker.io/ossrs/srs:v4.0.136
docker.io/emqx/emqx:4.2.12
docker.io/nacos/nacos-server:2.0.1
docker.io/mongo:5.0
docker.io/rabbitmq:3.7-management
docker.io/v2fly/v2fly-core:v4.38.3
docker.io/pollyduan/ingress-nginx-controller:v0.44.0
docker.io/jettech/kube-webhook-certgen:v1.5.1
docker.io/minio/minio:RELEASE.2022-03-26T06-49-28Z
docker.io/minio/minio:RELEASE.2022.5.4
docker.io/ossrs/srs:v4.0-b9
docker.io/kubernetesui/dashboard:v2.0.1
docker.io/kubernetesui/metrics-scraper:v1.0.4

View File

@@ -0,0 +1,252 @@
nodes:
- address: 192.168.8.65
user: rke-installer
role:
- controlplane
- etcd
- worker
internal_address: 192.168.8.65
- address: 192.168.8.66
user: rke-installer
role:
- worker
labels:
ingress-deploy: true
internal_address: 192.168.8.66
- address: 192.168.8.67
user: rke-installer
role:
- worker
internal_address: 192.168.8.67
labels:
mysql-deploy: true
- address: 192.168.8.68
user: rke-installer
role:
- worker
internal_address: 192.168.8.68
labels:
minio-deploy: true
authentication:
strategy: x509
sans:
- "192.168.8.6"
private_registries:
- url: 192.168.8.65:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /home/rke-installer/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 10.74.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 10.100.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 10.74.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 10.74.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 0
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
#network:
# mtu: 1440
# options:
# flannel_backend_type: vxlan
# plugin: calico
# tolerations:
# - key: "node.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
# - key: "node.kubernetes.io/not-ready"
# operator: "Exists"
# effect: "NoExecute"
# tolerationseconds: 300
network:
options:
flannel_backend_type: vxlan
flannel_iface: eth0
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: flannel
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal:
ip_address: '10.74.0.99'
node_selector: null
update_strategy:
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationseconds: 300
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 30500
https_port: 31500
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'