Your commit message

This commit is contained in:
zeaslity
2024-11-27 10:33:20 +08:00
commit 080c7bb97f
911 changed files with 168439 additions and 0 deletions

View File

@@ -0,0 +1,26 @@
#Write-Host "开始关闭 Clash"
#
## 关闭应用程序 clash
#Stop-Process -Name "Clash For Windows" -Force
#
#
#Write-Host "开始 启动 Clash"
#Start-Sleep -Seconds 2
#
## 启动应用程序 "C:\SSS\ClashForWindows\Clash for Windows"
#Start-Process -FilePath "C:\SSS\ClashForWindows\Clash for Windows\Clash for Windows.exe"
#
#Start-Sleep -Seconds 2
Write-Host "开始 修改 Clash 的配置 "
$uri = 'http://127.0.0.1:61889/proxies/:tc-bjc'
$headers = @{
'Authorization' = 'Bearer 5c090877-21bb-4006-a97c-0bd4bfbb9be9'
}
$result = Invoke-RestMethod -Uri $uri -Method Put -Headers $headers
# 可以根据需要处理$result的响应

View File

@@ -0,0 +1 @@
帮我写一段powershell脚本实现如下功能关闭应用程序clash, 启动应用程序"C:\SSS\ClashForWindows\Clash for Windows"

View File

@@ -0,0 +1,29 @@
#!/bin/bash
echo "-----------------------------------------------------------------------"
RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}')
echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
echo "y
" | lvremove /dev/rootvg/lvswap
freepesize=$(vgdisplay rootvg | grep 'Free PE' | awk '{print $5}')
lvextend -l+${freepesize} /dev/rootvg/lvroot
## #自动扩展XFS文件系统到最大的可用大小
xfs_growfs /dev/rootvg/lvroot
df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}'
echo "-----------------------------------------------------------------------"
RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}')
echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"

View File

@@ -0,0 +1,25 @@
#!/bin/bash
VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/vdb
partprobe
vgcreate ${VG_NAME} /dev/vdb1
selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
selffstab="/dev/mapper/${VG_NAME}-lvdata /data xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a

View File

@@ -0,0 +1,279 @@
#!/usr/bin/env bash
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
SKYBLUE='\033[0;36m'
PLAIN='\033[0m'
cancel() {
echo ""
next;
echo " Abort ..."
echo " Cleanup ..."
cleanup;
echo " Done"
exit
}
trap cancel SIGINT
benchinit() {
if [ -f /etc/redhat-release ]; then
release="centos"
elif cat /etc/issue | grep -Eqi "debian"; then
release="debian"
elif cat /etc/issue | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then
release="centos"
elif cat /proc/version | grep -Eqi "debian"; then
release="debian"
elif cat /proc/version | grep -Eqi "ubuntu"; then
release="ubuntu"
elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then
release="centos"
fi
[[ $EUID -ne 0 ]] && echo -e "${RED}Error:${PLAIN} This script must be run as root!" && exit 1
start=$(date +%s)
}
get_opsy() {
[ -f /etc/redhat-release ] && awk '{print ($1,$3~/^[0-9]/?$3:$4)}' /etc/redhat-release && return
[ -f /etc/os-release ] && awk -F'[= "]' '/PRETTY_NAME/{print $3,$4,$5}' /etc/os-release && return
[ -f /etc/lsb-release ] && awk -F'[="]+' '/DESCRIPTION/{print $2}' /etc/lsb-release && return
}
next() {
printf "%-70s\n" "-" | sed 's/\s/-/g' | tee -a $log
}
io_test() {
(LANG=C dd if=/dev/zero of=test_file_$$ bs=512K count=$1 conv=fdatasync && rm -f test_file_$$ ) 2>&1 | awk -F, '{io=$NF} END { print io}' | sed 's/^[ \t]*//;s/[ \t]*$//'
}
calc_disk() {
local total_size=0
local array=$@
for size in ${array[@]}
do
[ "${size}" == "0" ] && size_t=0 || size_t=`echo ${size:0:${#size}-1}`
[ "`echo ${size:(-1)}`" == "K" ] && size=0
[ "`echo ${size:(-1)}`" == "M" ] && size=$( awk 'BEGIN{printf "%.1f", '$size_t' / 1024}' )
[ "`echo ${size:(-1)}`" == "T" ] && size=$( awk 'BEGIN{printf "%.1f", '$size_t' * 1024}' )
[ "`echo ${size:(-1)}`" == "G" ] && size=${size_t}
total_size=$( awk 'BEGIN{printf "%.1f", '$total_size' + '$size'}' )
done
echo ${total_size}
}
power_time() {
result=$(smartctl -a $(result=$(cat /proc/mounts) && echo $(echo "$result" | awk '/data=ordered/{print $1}') | awk '{print $1}') 2>&1) && power_time=$(echo "$result" | awk '/Power_On/{print $10}') && echo "$power_time"
}
virt_check(){
if hash ifconfig 2>/dev/null; then
eth=$(ifconfig)
fi
virtualx=$(dmesg) 2>/dev/null
if [ $(which dmidecode) ]; then
sys_manu=$(dmidecode -s system-manufacturer) 2>/dev/null
sys_product=$(dmidecode -s system-product-name) 2>/dev/null
sys_ver=$(dmidecode -s system-version) 2>/dev/null
else
sys_manu=""
sys_product=""
sys_ver=""
fi
if grep docker /proc/1/cgroup -qa; then
virtual="Docker"
elif grep lxc /proc/1/cgroup -qa; then
virtual="Lxc"
elif grep -qa container=lxc /proc/1/environ; then
virtual="Lxc"
elif [[ -f /proc/user_beancounters ]]; then
virtual="OpenVZ"
elif [[ "$virtualx" == *kvm-clock* ]]; then
virtual="KVM"
elif [[ "$cname" == *KVM* ]]; then
virtual="KVM"
elif [[ "$cname" == *QEMU* ]]; then
virtual="KVM"
elif [[ "$virtualx" == *"VMware Virtual Platform"* ]]; then
virtual="VMware"
elif [[ "$virtualx" == *"Parallels Software International"* ]]; then
virtual="Parallels"
elif [[ "$virtualx" == *VirtualBox* ]]; then
virtual="VirtualBox"
elif [[ -e /proc/xen ]]; then
virtual="Xen"
elif [[ "$sys_manu" == *"Microsoft Corporation"* ]]; then
if [[ "$sys_product" == *"Virtual Machine"* ]]; then
if [[ "$sys_ver" == *"7.0"* || "$sys_ver" == *"Hyper-V" ]]; then
virtual="Hyper-V"
else
virtual="Microsoft Virtual Machine"
fi
fi
else
virtual="Dedicated"
fi
}
power_time_check(){
echo -ne " Power time of disk : "
install_smart
ptime=$(power_time)
echo -e "${SKYBLUE}$ptime Hours${PLAIN}"
}
freedisk() {
freespace=$( df -m . | awk 'NR==2 {print $4}' )
if [[ $freespace == "" ]]; then
$freespace=$( df -m . | awk 'NR==3 {print $3}' )
fi
if [[ $freespace -gt 1024 ]]; then
printf "%s" $((1024*2))
elif [[ $freespace -gt 512 ]]; then
printf "%s" $((512*2))
elif [[ $freespace -gt 256 ]]; then
printf "%s" $((256*2))
elif [[ $freespace -gt 128 ]]; then
printf "%s" $((128*2))
else
printf "1"
fi
}
print_io() {
if [[ $1 == "fast" ]]; then
writemb=$((128*2))
else
writemb=$(freedisk)
fi
writemb_size="$(( writemb / 2 ))MB"
if [[ $writemb_size == "1024MB" ]]; then
writemb_size="1.0GB"
fi
if [[ $writemb != "1" ]]; then
echo -n " I/O Speed( $writemb_size ) : " | tee -a $log
io1=$( io_test $writemb )
echo -e "${YELLOW}$io1${PLAIN}" | tee -a $log
echo -n " I/O Speed( $writemb_size ) : " | tee -a $log
io2=$( io_test $writemb )
echo -e "${YELLOW}$io2${PLAIN}" | tee -a $log
echo -n " I/O Speed( $writemb_size ) : " | tee -a $log
io3=$( io_test $writemb )
echo -e "${YELLOW}$io3${PLAIN}" | tee -a $log
ioraw1=$( echo $io1 | awk 'NR==1 {print $1}' )
[ "`echo $io1 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw1=$( awk 'BEGIN{print '$ioraw1' * 1024}' )
ioraw2=$( echo $io2 | awk 'NR==1 {print $1}' )
[ "`echo $io2 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw2=$( awk 'BEGIN{print '$ioraw2' * 1024}' )
ioraw3=$( echo $io3 | awk 'NR==1 {print $1}' )
[ "`echo $io3 | awk 'NR==1 {print $2}'`" == "GB/s" ] && ioraw3=$( awk 'BEGIN{print '$ioraw3' * 1024}' )
ioall=$( awk 'BEGIN{print '$ioraw1' + '$ioraw2' + '$ioraw3'}' )
ioavg=$( awk 'BEGIN{printf "%.1f", '$ioall' / 3}' )
echo -e " Average I/O Speed : ${YELLOW}$ioavg MB/s${PLAIN}" | tee -a $log
else
echo -e " ${RED}Not enough space!${PLAIN}"
fi
}
print_system_info() {
echo -e " CPU Model : ${SKYBLUE}$cname${PLAIN}" | tee -a $log
echo -e " CPU Cores : ${YELLOW}$cores Cores ${SKYBLUE}$freq MHz $arch${PLAIN}" | tee -a $log
echo -e " CPU Cache : ${SKYBLUE}$corescache ${PLAIN}" | tee -a $log
echo -e " OS : ${SKYBLUE}$opsy ($lbit Bit) ${YELLOW}$virtual${PLAIN}" | tee -a $log
echo -e " Kernel : ${SKYBLUE}$kern${PLAIN}" | tee -a $log
echo -e " Total Space : ${SKYBLUE}$disk_used_size GB / ${YELLOW}$disk_total_size GB ${PLAIN}" | tee -a $log
echo -e " Total RAM : ${SKYBLUE}$uram MB / ${YELLOW}$tram MB ${SKYBLUE}($bram MB Buff)${PLAIN}" | tee -a $log
echo -e " Total SWAP : ${SKYBLUE}$uswap MB / $swap MB${PLAIN}" | tee -a $log
echo -e " Uptime : ${SKYBLUE}$up${PLAIN}" | tee -a $log
echo -e " Load Average : ${SKYBLUE}$load${PLAIN}" | tee -a $log
echo -e " TCP CC : ${YELLOW}$tcpctrl${PLAIN}" | tee -a $log
}
print_end_time() {
end=$(date +%s)
time=$(( $end - $start ))
if [[ $time -gt 60 ]]; then
min=$(expr $time / 60)
sec=$(expr $time % 60)
echo -ne " Finished in : ${min} min ${sec} sec" | tee -a $log
else
echo -ne " Finished in : ${time} sec" | tee -a $log
fi
printf '\n' | tee -a $log
bj_time=$(curl -s http://cgi.im.qq.com/cgi-bin/cgi_svrtime)
if [[ $(echo $bj_time | grep "html") ]]; then
bj_time=$(date -u +%Y-%m-%d" "%H:%M:%S -d '+8 hours')
fi
echo " Timestamp : $bj_time GMT+8" | tee -a $log
echo " Results : $log"
}
get_system_info() {
cname=$( awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
cores=$( awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo )
freq=$( awk -F: '/cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
corescache=$( awk -F: '/cache size/ {cache=$2} END {print cache}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//' )
tram=$( free -m | awk '/Mem/ {print $2}' )
uram=$( free -m | awk '/Mem/ {print $3}' )
bram=$( free -m | awk '/Mem/ {print $6}' )
swap=$( free -m | awk '/Swap/ {print $2}' )
uswap=$( free -m | awk '/Swap/ {print $3}' )
up=$( awk '{a=$1/86400;b=($1%86400)/3600;c=($1%3600)/60} {printf("%d days %d hour %d min\n",a,b,c)}' /proc/uptime )
load=$( w | head -1 | awk -F'load average:' '{print $2}' | sed 's/^[ \t]*//;s/[ \t]*$//' )
opsy=$( get_opsy )
arch=$( uname -m )
lbit=$( getconf LONG_BIT )
kern=$( uname -r )
disk_size1=$( LANG=C df -hPl | grep -wvE '\-|none|tmpfs|overlay|shm|udev|devtmpfs|by-uuid|chroot|Filesystem' | awk '{print $2}' )
disk_size2=$( LANG=C df -hPl | grep -wvE '\-|none|tmpfs|overlay|shm|udev|devtmpfs|by-uuid|chroot|Filesystem' | awk '{print $3}' )
disk_total_size=$( calc_disk ${disk_size1[@]} )
disk_used_size=$( calc_disk ${disk_size2[@]} )
tcpctrl=$( sysctl net.ipv4.tcp_congestion_control | awk -F ' ' '{print $3}' )
virt_check
}
cleanup() {
rm -f test_file_*
rm -rf speedtest*
rm -f fast_com*
rm -f tools.py
rm -f ip_json.json
}
bench_all(){
benchinit;
clear
next;
get_system_info;
print_system_info;
next;
print_io;
next;
print_end_time;
next;
cleanup;
}
log="./superbench.log"
true > $log
speedLog="./speedtest.log"
true > $speedLog
bench_all

View File

@@ -0,0 +1,125 @@
#!/bin/bash
# 需要实现的功能-- 指定安装的JDK版本指定以
## 默认安装的版本为 Oracle-JDK 11.0.9
## 二进制的安装方式
## 源package的安装方式
## 对外暴露的安装JDK的主函数
InstallJDK(){
JDK_VERSION="11"
JDK_Install_Method="binary"
JDKInstallHelp="0"
while [[ $# > 0 ]]; do
case "$1" in
-v|--version)
JDK_VERSION="${2}"
shift # past argument
;;
-m|--method)
JDK_Install_Method="${2}"
shift # past argument
;;
-h|--help)
JDKInstallHelp="1"
;;
*)
echo "输入的内容有误,请检查!"
# unknown option
;;
esac
shift # past argument or value
done
if [ "${JDKInstallHelp}" -eq "1" ]; then
cat - 1>& 2 << EOF
./install-release.sh [-h] [-p|--Version 6379] [-m|--method binary|docker]
-h, --help 打印此安装帮助说明
-v, --version 安装JDK的版本如果不指定此参数则默为11
-m, --method 安装JDK的方式binary == 源码编译安装package == 使用源package安装不指定则使用binary
EOF
return 0
fi
# echo $JDKVersion
# echo $JDKInstallMethod
if [[ ${JDK_Install_Method} == "binary" ]]; then
InstallJDKBinary ${JDK_VERSION}
else
InstallJDKPackage ${JDK_VERSION}
fi
}
InstallJDKBinary() {
JDK_VERSION=""
JDK_FILENAME=""
if [[ "$1" -ne " " ]]; then
JDK_VERSION="$1"
echo "JDK Version = ${JDK_VERSION}"
fi
echo "InstallJDKBinary"
echo ""
echo "开始下载 JDK 的源码包!!"
mkdir -p /usr/local/java/
if [ "${JDK_VERSION}" -eq "11" ]; then
JDK_FILENAME="jdk-11.0.9"
wget https://objectstorage.ap-seoul-1.oraclecloud.com/n/cnk8d6fazu16/b/seoul/o/jdk-11.0.9_linux-x64_bin.tar.gz
echo "JDK 二进制文件下载成功,开始解压缩!!"
tar -zxvf jdk-11.0.9_linux-x64_bin.tar.gz -C /usr/local/java/
else
JDK_FILENAME="jdk1.8.0_271"
wget https://objectstorage.ap-seoul-1.oraclecloud.com/n/cnk8d6fazu16/b/seoul/o/jdk-8u271-linux-x64.tar.gz
echo "JDK 二进制文件下载成功,开始解压缩!!"
tar -zxvf jdk-8u271-linux-x64.tar.gz -C /usr/local/java/
fi
echo ""
echo "开始配置JDK的环境变量"
if [ -e /etc/profile.d/jdk-env.sh ]; then
echo "jdk的配置文件已经存在将会进行覆盖操作"
cp /etc/profile.d/jdk-env.sh .jdk-env-backup.sh
rm /usr/bin/java
fi
cat >/etc/profile.d/jdk-env.sh <<EOF
export JAVA_HOME=/usr/local/java/${JDK_FILENAME}
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=${JAVA_HOME}/lib:${JRE_HOME}/lib:${CLASSPATH}
export PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin:${PATH}
EOF
source /etc/profile
ln -s /usr/local/java/${JDK_FILENAME}/bin/java /usr/bin/java
echo ""
echo ""
echo ""
echo "请检查JDK的安装情况======================================"
java -version
}
InstallJDKPackage(){
JDK_VERSION=""
if [[ "$1" -ne " " ]]; then
JDK_VERSION="$1"
echo "JDK Version = ${JDK_VERSION}"
fi
echo "InstallJDKDocker"
echo ""
apt-cache madison openjdk-11-jdk
}
InstallJDK -v 11 -m binary

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,75 @@
#!/bin/bash
# 关闭虚拟缓存
# swapoff -a
# cp -f /etc/fstab /etc/fstab_bak
# cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/rootvg-lvopt
# freepesize=$(vgdisplay rootvg | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/rootvg-lvroot
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/rootvg-lvroot
# df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/rootvg-lvroot" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
# echo ""
# echo ""
# echo ""
# echo "-----------------------------------------------------------------------"
## vloume group的名字
VG_NAME=datavg
## logic volume 的名字
LV_NAME=lvdocker
## 实际物理磁盘的名称
PV_DISK=vdb
## 需要将物理磁盘挂载的目录
BIND_PATH=/var/lib/docker
echo "n
p
t
8e
w
" | fdisk /dev/${PV_DISK}
partprobe
vgcreate ${VG_NAME} /dev/${PV_DISK}1
selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
lvcreate -l ${selfpesize} -n ${LV_NAME} ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-${LV_NAME}
mkdir -p ${BIND_PATH}
selffstab="/dev/mapper/${VG_NAME}-${LV_NAME} ${BIND_PATH} xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"

View File

@@ -0,0 +1,112 @@
#!/bin/bash
# 需要实现的功能--指定端口 安装Mysql
## 二进制的安装方式
## docker的安装方式
InstallMysql(){
MysqlPort="3306"
MysqlInstallMethod="binary"
MysqlInstallHelp="0"
MysqlPersistData="/var/lib/docker/mysql-data"
while [[ $# > 0 ]]; do
case "$1" in
-p|--port)
MysqlPort="${2}"
shift # past argument
;;
-m|--method)
MysqlInstallMethod="${2}"
shift # past argument
;;
-d|--data)
MysqlPersistData="${2}"
shift # past argument
;;
-h|--help)
MysqlInstallHelp="1"
;;
*)
echo "输入的内容有误,请检查!"
# unknown option
;;
esac
shift # past argument or value
done
if [ "${MysqlInstallHelp}" -eq "1" ]; then
cat - 1>& 2 << EOF
./install-release.sh [-h] [-p|--port 3306] [-m|--method binary|docker]
-h, --help 打印此安装帮助说明
-p, --port 安装Mysql的端口如果不指定此参数则默为3306
-m, --method 安装Mysql的方式binary == 源码编译安装docker == 使用docker安装不指定则使用binary
EOF
fi
# echo $MysqlPort
# echo $MysqlInstallMethod
if [[ ${MysqlInstallMethod} == "binary" ]]; then
InstallMysqlBinary ${MysqlPort}
else
InstallMysqlDocker ${MysqlPort} ${MysqlPersistData}
fi
}
InstallMysqlBinary() {
MysqlPort=""
if [[ "$1" -ne " " ]]; then
MysqlPort="$1"
echo "mysql Port = ${MysqlPort}"
fi
echo "InstallMysqlBinary"
}
InstallMysqlDocker(){
MysqlPort=""
MysqlPersistData=""
if [[ "$1" -ne " " ]]; then
MysqlPort="$1"
echo "mysql Port = ${MysqlPort}"
fi
if [[ "${2}" -ne " " ]]; then
MysqlPersistData="$2"
echo "mysql persist data path = ${MysqlPersistData}"
fi
echo "InstallMysqlDocker"
echo ""
echo "开始启动docker-mysql !!"
# https://hub.docker.com/r/bitnami/mysql#configuration
# 需要准备一个目录方式 mysql.conf文件
# 目录权限需要处理
mkdir -p ${MysqlPersistData}
chown -R 1001:1001 ${MysqlPersistData}
docker run -d \
-e MYSQL_ROOT_USER=root \
-e MYSQL_ROOT_PASSWORD=v2ryStr@ngPa.ss \
-e MYSQL_CHARACTER_SET=utf8mb4 \
-e MYSQL_COLLATE=utf8mb4_bin \
-e MYSQL_DATABASE=demo \
-e MYSQL_USER=wdd \
-e MYSQL_PASSWORD=wdd14Fk@Clever \
-e MYSQL_PORT_NUMBER=${MysqlPort} \
-e MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password \
-v ${MysqlPersistData}:/bitnami/mysql/data \
--name mysql-server \
--network host \
bitnami/mysql:8.0.27-debian-10-r40
}
InstallMysql -p 33306 -m docker -d "/var/lib/docker/mysql-pv"

View File

@@ -0,0 +1,46 @@
version: '3'
services:
redis-1:
image: redis:5.0.5
container_name: redis-1
ports:
- 6380:6379
redis-2:
image: redis:5.0.5
container_name: redis-2
ports:
- 6381:6379
command: redis-server --slaveof redis-1 6379
# 让两个slave节点去复制container_name为redis-master的节点这样就组成了一个简单的3个节点的主从架构
redis-3:
image: redis:5.0.5
container_name: redis-3
ports:
- 6382:6379
command: redis-server --slaveof redis-1 6379
# 让两个slave节点去复制container_name为redis-master的节点这样就组成了一个简单的3个节点的主从架构
sentinel-1:
image: redis:5.0.5
container_name: redis-sentinel-1
ports:
- 26379:26379
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- ./sentinel1.conf:/usr/local/etc/redis/sentinel.conf
sentinel-2:
image: redis:5.0.5
container_name: redis-sentinel-2
ports:
- 26380:26379
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- ./sentinel2.conf:/usr/local/etc/redis/sentinel.conf
sentinel-3:
image: redis:5.0.5
container_name: redis-sentinel-3
ports:
- 26381:26379
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
volumes:
- ./sentinel3.conf:/usr/local/etc/redis/sentinel.conf

View File

@@ -0,0 +1,7 @@
port 26379
dir "/tmp"
sentinel myid 0bd93b751edefaaef7249a486a342d6d0d007ebe
sentinel deny-scripts-reconfig yes
sentinel monitor mymaster 172.18.0.3 6379 2
sentinel config-epoch mymaster 2

View File

@@ -0,0 +1,7 @@
port 26379
dir "/tmp"
sentinel myid 91a6c9cc2b7680584e70ea4b88474d8f95260bf8
sentinel deny-scripts-reconfig yes
sentinel monitor mymaster 172.18.0.3 6379 2
sentinel config-epoch mymaster 2

View File

@@ -0,0 +1,7 @@
port 26379
dir "/tmp"
sentinel myid 58ec1266fc47abc23bb3ca8ebdca2e52b48584ad
sentinel deny-scripts-reconfig yes
sentinel monitor mymaster 172.18.0.3 6379 2
sentinel config-epoch mymaster 2

View File

@@ -0,0 +1,390 @@
bind 0.0.0.0
protected-mode no
port RedisPort
tcp-backlog 511
timeout 0
tcp-keepalive 300
supervised auto
daemonize yes
pidfile "/var/run/redis-RedisPort.pid"
loglevel notice
logfile "/var/log/redis_RedisPort.log"
databases 16
always-show-logo no
set-proc-title yes
proc-title-template "{title} {listen-addr} {server-mode}"
################################ SNAPSHOTTING ################################
# Save the DB to disk.
#
# save <seconds> <changes>
#
# Redis will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# Snapshotting can be completely disabled with a single empty string argument
# as in following example:
#
# save ""
#
# Unless specified otherwise, by default Redis will save the DB:
# * After 3600 seconds (an hour) if at least 1 key changed
# * After 300 seconds (5 minutes) if at least 100 keys changed
# * After 60 seconds if at least 10000 keys changed
#
# You can set these explicitly by uncommenting the three following lines.
#
# save 3600 1
# save 300 100
# save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# By default compression is enabled as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# Enables or disables full sanitation checks for ziplist and listpack etc when
# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
# crash later on while processing commands.
# Options:
# no - Never perform full sanitation
# yes - Always perform full sanitation
# clients - Perform full sanitation only for user connections.
# Excludes: RDB files, RESTORE commands received from the master
# connection, and client connections which have the
# skip-sanitize-payload ACL flag.
# The default should be 'clients' but since it currently affects cluster
# resharding via MIGRATE, it is temporarily set to 'no' by default.
#
# sanitize-dump-payload no
# The filename where to dump the DB
dbfilename dump.rdb
rdb-del-sync-files no
dir /var/redis/RedisPort
acllog-max-len 128
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
# It is also possible, for the case when to replace the user code DEL calls
# with UNLINK calls is not easy, to modify the default behavior of the DEL
# command to act exactly like UNLINK, using the following configuration
# directive:
lazyfree-lazy-user-del no
# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
# commands. When neither flag is passed, this directive will be used to determine
# if the data should be deleted asynchronously.
lazyfree-lazy-user-flush no
oom-score-adj no
oom-score-adj-values 0 200 800
disable-thp yes
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log. Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# will be found.
aof-load-truncated yes
# When rewriting the AOF file, Redis is able to use an RDB preamble in the
# AOF file for faster rewrites and recoveries. When this option is turned
# on the rewritten AOF file is composed of two different stanzas:
#
# [RDB file][AOF tail]
#
# When loading, Redis recognizes that the AOF file starts with the "REDIS"
# string and loads the prefixed RDB file, then continues loading the AOF
# tail.
aof-use-rdb-preamble yes
lua-time-limit 5000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enabled at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
notify-keyspace-events ""
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Lists are also encoded in a special way to save a lot of space.
# The number of entries allowed per internal list node can be specified
# as a fixed maximum size or a maximum number of elements.
# For a fixed maximum size, use -5 through -1, meaning:
# -5: max size: 64 Kb <-- not recommended for normal workloads
# -4: max size: 32 Kb <-- not recommended
# -3: max size: 16 Kb <-- probably not recommended
# -2: max size: 8 Kb <-- good
# -1: max size: 4 Kb <-- good
# Positive numbers mean store up to _exactly_ that number of elements
# per list node.
# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
# but if your use case is unique, adjust the settings as necessary.
list-max-ziplist-size -2
# Lists may also be compressed.
# Compress depth is the number of quicklist ziplist nodes from *each* side of
# the list to *exclude* from compression. The head and tail of the list
# etc.
list-compress-depth 0
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Streams macro node max size / items. The stream data structure is a radix
# tree of big nodes that encode multiple items inside. Using this configuration
# it is possible to configure how big a single node can be in bytes, and the
# maximum number of items it may contain before switching to a new node when
# appending new stream entries. If any of the following settings are set to
# zero, the limit is ignored, so for instance it is possible to set just a
# max entries limit by setting max-bytes to 0 and max-entries to the desired
# value.
stream-node-max-bytes 4096
stream-node-max-entries 100
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
# Normally it is useful to have an HZ value which is proportional to the
# number of clients connected. This is useful in order, for instance, to
# avoid too many clients are processed for each background task invocation
# in order to avoid latency spikes.
#
# Since the default HZ value by default is conservatively set to 10, Redis
# offers, and enables by default, the ability to use an adaptive HZ value
# which will temporarily raise when there are many connected clients.
#
# When dynamic HZ is enabled, the actual configured HZ will be used
# as a baseline, but multiples of the configured HZ value will be actually
# used as needed once more clients are connected. In this way an idle
# instance will use very little CPU time while a busy instance will be
# more responsive.
dynamic-hz yes
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
# When redis saves RDB file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes

View File

@@ -0,0 +1,166 @@
#!/bin/bash
# 需要实现的功能--指定端口 安装redis
## 二进制的安装方式
## docker的安装方式
## 对外暴露的安装Redis的主函数
InstallRedis(){
RedisPort="6379"
RedisInstallMethod="binary"
RedisInstallHelp="0"
while [[ $# > 0 ]]; do
case "$1" in
-p|--port)
RedisPort="${2}"
shift # past argument
;;
-m|--method)
RedisInstallMethod="${2}"
shift # past argument
;;
-h|--help)
RedisInstallHelp="1"
;;
*)
echo "输入的内容有误,请检查!"
# unknown option
;;
esac
shift # past argument or value
done
if [ "${RedisInstallHelp}" -eq "1" ]; then
cat - 1>& 2 << EOF
./install-release.sh [-h] [-p|--port 6379] [-m|--method binary|docker]
-h, --help 打印此安装帮助说明
-p, --port 安装Redis的端口如果不指定此参数则默为6379
-m, --method 安装Redis的方式binary == 源码编译安装docker == 使用docker安装不指定则使用binary
EOF
fi
# echo $RedisPort
# echo $RedisInstallMethod
if [[ ${RedisInstallMethod} == "binary" ]]; then
InstallRedisBinary ${RedisPort}
else
InstallRedisDocker ${RedisPort}
fi
}
InstallRedisBinary() {
RedisPort=""
if [[ "$1" -ne " " ]]; then
RedisPort="$1"
echo "Redis Port = ${RedisPort}"
fi
echo "InstallRedisBinary"
CMD_REMOVE gcc
installDemandSoftwares gcc wget
echo "开始下载 Redis 6.2.6 的二进制包!"
wget https://objectstorage.ap-seoul-1.oraclecloud.com/n/cnk8d6fazu16/b/seoul/o/redis-6.2.6.tar.gz
if [ -e redis-6.2.6.tar.gz ]; then
echo "redis源码包下载完成"
echo ""
echo "开始解压缩redis的安装包"
tar -zvxf redis-6.2.6.tar.gz
cd redis-6.2.6
clear
echo ""
echo ""
echo "开始执行编译安装过程!!"
echo "开始执行编译安装过程!!"
echo "开始执行编译安装过程!!"
echo "取决于服务器的性能,可能花费较长的时间!!!"
sleep 3
echo ""
./configure
make && make install
cd redis-6.2.6
echo "Redis已经安装成功"
ehco "开始进行redis的配置修改"
wget https://objectstorage.ap-seoul-1.oraclecloud.com/n/cnk8d6fazu16/b/seoul/o/redis-6.2.6.conf
wget https://objectstorage.ap-seoul-1.oraclecloud.com/n/cnk8d6fazu16/b/seoul/o/redis-server-6.2.6.service
if [ -e redis-6.2.6.conf ] && [ -e redis-server-6.2.6.service ]; then
echo "redis配置文件下载成功开始进行修改"
echo ""
touch /var/log/redis_${RedisPort}.log
mkdir -p /var/redis/${RedisPort}
mkdir -p /etc/redis/
sed -i "s/RedisPort/${RedisPort}/g" redis-6.2.6.conf
cp redis-6.2.6.conf /etc/redis/${RedisPort}.conf
sed -i "s/RedisPort/${RedisPort}/g" redis-server-6.2.6.service
cp redis-server-6.2.6.service /etc/init.d/redisd
cd /etc/init.d
chmod +x /etc/init.d/redisd
if [ command_exists chkconfig ]; then
chkconfig redisd on
elif [ command_exists update-rc.d ]; then
update-rc.d redisd defaults
else
echo "所需要的守护程序未安装,请手动设置!!"
fi
# 启动程序
echo ""
echo "开始启动redis-server服务……"
service redisd start
service redisd status
netstat -ntlp | grep redis
else
echo "redis配置文件下载失败请手动进行修改"
return 3
fi
else
echo "redis源码包下载失败"
return 3
fi
}
InstallRedisDocker(){
RedisPort=""
if [[ "$1" -ne " " ]]; then
RedisPort="$1"
echo "Redis Port = ${RedisPort}"
fi
echo "InstallRedisDocker"
echo ""
echo "## 为redis配置添加 ">>/etc/sysctl.conf
echo "vm.overcommit_memory = 1">>/etc/sysctl.conf
sysctl -p /etc/sysctl.conf
echo "开始启动docker-redis !!"
# https://hub.docker.com/r/bitnami/redis#configuration
# 为redis设置密码 -e REDIS_PASSWORD=v2ryStr@ngPa.ss \
docker run -d \
-e ALLOW_EMPTY_PASSWORD=yes \
-e REDIS_AOF_ENABLED=no \
-e REDIS_PORT_NUMBER=${RedisPort} \
--name redis-server \
--network host \
bitnami/redis:6.2.6
}
InstallRedis -p 36379 -m docker

View File

@@ -0,0 +1,63 @@
#!/bin/sh
#
# Simple Redis init.d script conceived to work on Linux systems
# as it does use of the /proc filesystem.
### BEGIN INIT INFO
# Provides: redis_6379
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Redis data structure server
# Description: Redis data structure server. See https://redis.io
### END INIT INFO
REDISPORT=RedisPort
EXEC=/usr/local/bin/redis-server
CLIEXEC=/usr/local/bin/redis-cli
PIDFILE=/var/run/redis_${REDISPORT}.pid
CONF="/etc/redis/${REDISPORT}.conf"
case "$1" in
start)
if [ -f $PIDFILE ]
then
echo "$PIDFILE exists, process is already running or crashed"
else
echo "Starting Redis server..."
$EXEC $CONF
fi
;;
stop)
if [ ! -f $PIDFILE ]
then
echo "$PIDFILE does not exist, process is not running"
else
PID=$(cat $PIDFILE)
echo "Stopping ..."
${CLIEXEC} -p ${REDISPORT} shutdown
while [ -x /proc/${PID} ]
do
echo "Waiting for Redis to shutdown ..."
sleep 1
done
echo "Redis stopped"
fi
;;
status)
PID=$(cat $PIDFILE)
if [ ! -x /proc/${PID} ]
then
echo 'Redis is not running'
else
echo "Redis is running ($PID)"
fi
;;
restart)
$0 stop
$0 start
;;
*)
echo "Please use start, stop, restart or status as first argument"
;;
esac

View File

@@ -0,0 +1,201 @@
#!/bin/bash
#########color code#############
RED="31m" # Error message
GREEN="32m" # Success message
YELLOW="33m" # Warning message
BLUE="36m" # Info message
SYSTEMCTL_CMD=$(command -v systemctl &>/dev/null)
SERVICE_CMD=$(command -v service &>/dev/null)
sshConfigFile="/etc/ssh/sshd_config"
### SSH的登录端口修改
SSHLoginPort="22333"
## 下面的应该被改成yes
PermitRootLogin="PermitRootLogin"
PasswordAuthentication="PasswordAuthentication"
tcpKeepAlive="TCPKeepAlive"
## 下面的应该被改成no
changeResponseAuthentication="ChallengeResponseAuthentication"
PermitEmptyPasswords="PermitEmptyPasswords"
StrictModes="StrictModes"
###############color echo func#################
colorEcho(){
echo -e "\033[${1}${@:2}\033[0m" 1>& 2
}
check_root(){
if [[ $EUID != 0 ]];then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
back_up_config(){
if [ -a $sshConfigFile.backup ]
then
colorEcho ${BLUE} "sshd的备份文件已存在无需备份。"
else
cp $sshConfigFile $sshConfigFile.backup
colorEcho ${GREEN} "sshd.config文件备份成功"
fi
}
modify_sshd_config_yes(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 yes" $sshConfigFile
then
shift
elif grep -x "#$1 yes" $sshConfigFile
then
sed -i "s/#$1 yes/$1 yes/g" $sshConfigFile
shift
elif grep -x "$1 no" $sshConfigFile
then
sed -i "s/$1 no/$1 yes/g" $sshConfigFile
shift
else
sed -i "$ a $1 yes" $sshConfigFile
shift
fi
done
}
modify_sshd_config_no(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 no" $sshConfigFile
then
shift
elif grep -x "#$1 no" $sshConfigFile
then
sed -i "s/#$1 no/$1 no/g" $sshConfigFile
shift
elif grep -x "$1 yes" $sshConfigFile
then
sed -i "s/$1 yes/$1 no/g" $sshConfigFile
shift
else
sed -i "$ a $1 no" $sshConfigFile
shift
fi
done
}
modify_sshd_config(){
declare -a needToChangeYes
declare -a needToChangeNo
needToChangeYes[0]=$tcpKeepAlive
needToChangeYes[1]=$PermitRootLogin
needToChangeYes[2]=$PasswordAuthentication
needToChangeNo[0]=$PermitEmptyPasswords
needToChangeNo[1]=$changeResponseAuthentication
needToChangeNo[2]=$StrictModes
# 以数组的方式 将参数传入函数
modify_sshd_config_yes "${needToChangeYes[@]}"
modify_sshd_config_no "${needToChangeNo[@]}"
colorEcho $GREEN "SSHD文件已经修改成功。。。"
}
restartSSHDService(){
check_success(){
if [[ $1 -eq 0 ]]
then
colorEcho ${BLUE} "sshd.service服务已经重启完成"
colorEcho ${GREEN} "sshd文件已经修改成功可以进行root登录请修改root密码~~"
else
colorEcho ${RED} "sshd服务重启失败请检查原因!!!"
colorEcho ${RED} "如果是CentOS大概率是防火墙的问题。"
fi
}
if [[ ${SYSTEMCTL_CMD} -eq 0 ]]
then
systemctl restart sshd.service
check_success $?
elif [[ ${SERVICE_CMD} -eq 0 ]]
then
service restart sshd.service
check_success $?
else
colorEcho ${RED} "缺少systemctl和service本脚本不支持"
return 23
fi
}
changeSSHLoginPort(){
if grep -xw "Port ${SSHLoginPort}" $sshConfigFile &>/dev/null
then
colorEcho ${BLUE} "当前的ssh登录端口已经为${SSHLoginPort},无需修改!"
else
sed -i "/^#Port 22/a Port ${SSHLoginPort}" $sshConfigFile
if [[ $? -eq 0 ]]
then
colorEcho ${GREEN} "ssh的登陆端口已被修改为${SSHLoginPort},请修改防火墙以开放该端口!!"
fi
fi
}
extendIntervalTime(){
echo "ClientAliveInterval 30" >> /etc/ssh/sshd_config
echo "ClientAliveCountMax 60" >> /etc/ssh/sshd_config
}
modify_firewall(){
echo ""
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
systemctl stop firewalld
systemctl disable firewalld
systemctl stop ufw
systemctl disable ufw
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# iptables -F
}
main(){
# 首先检查是否拥有root权限
check_root
# 备份一份sshd的配置文件
back_up_config
# 使用函数修改一些配置
modify_sshd_config
# 增加访问端口改变
changeSSHLoginPort
# 修改ssh的连接中断延时
extendIntervalTime
# 关闭防火墙服务否则无法重启sshd服务
modify_firewall
# 重启SSHD服务
restartSSHDService
}
main

View File

@@ -0,0 +1,201 @@
#!/bin/bash
#########color code#############
RED="31m" # Error message
GREEN="32m" # Success message
YELLOW="33m" # Warning message
BLUE="36m" # Info message
SYSTEMCTL_CMD=$(command -v systemctl &>/dev/null)
SERVICE_CMD=$(command -v service &>/dev/null)
sshConfigFile="/etc/ssh/sshd_config"
### SSH的登录端口修改
SSHLoginPort="10000"
## 下面的应该被改成yes
PermitRootLogin="PermitRootLogin"
PasswordAuthentication="PasswordAuthentication"
tcpKeepAlive="TCPKeepAlive"
## 下面的应该被改成no
changeResponseAuthentication="ChallengeResponseAuthentication"
PermitEmptyPasswords="PermitEmptyPasswords"
StrictModes="StrictModes"
###############color echo func#################
colorEcho(){
echo -e "\033[${1}${@:2}\033[0m" 1>& 2
}
check_root(){
if [[ $EUID != 0 ]];then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
back_up_config(){
if [ -a $sshConfigFile.backup ]
then
colorEcho ${BLUE} "sshd的备份文件已存在无需备份。"
else
cp $sshConfigFile $sshConfigFile.backup
colorEcho ${GREEN} "sshd.config文件备份成功"
fi
}
modify_sshd_config_yes(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 yes" $sshConfigFile
then
shift
elif grep -x "#$1 yes" $sshConfigFile
then
sed -i "s/#$1 yes/$1 yes/g" $sshConfigFile
shift
elif grep -x "$1 no" $sshConfigFile
then
sed -i "s/$1 no/$1 yes/g" $sshConfigFile
shift
else
sed -i "$ a $1 yes" $sshConfigFile
shift
fi
done
}
modify_sshd_config_no(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 no" $sshConfigFile
then
shift
elif grep -x "#$1 no" $sshConfigFile
then
sed -i "s/#$1 no/$1 no/g" $sshConfigFile
shift
elif grep -x "$1 yes" $sshConfigFile
then
sed -i "s/$1 yes/$1 no/g" $sshConfigFile
shift
else
sed -i "$ a $1 no" $sshConfigFile
shift
fi
done
}
modify_sshd_config(){
declare -a needToChangeYes
declare -a needToChangeNo
needToChangeYes[0]=$tcpKeepAlive
needToChangeYes[1]=$PermitRootLogin
needToChangeYes[2]=$PasswordAuthentication
needToChangeNo[0]=$PermitEmptyPasswords
needToChangeNo[1]=$changeResponseAuthentication
needToChangeNo[2]=$StrictModes
# 以数组的方式 将参数传入函数
modify_sshd_config_yes "${needToChangeYes[@]}"
modify_sshd_config_no "${needToChangeNo[@]}"
colorEcho $GREEN "SSHD文件已经修改成功。。。"
}
restartSSHDService(){
check_success(){
if [[ $1 -eq 0 ]]
then
colorEcho ${BLUE} "sshd.service服务已经重启完成"
colorEcho ${GREEN} "sshd文件已经修改成功可以进行root登录请修改root密码~~"
else
colorEcho ${RED} "sshd服务重启失败请检查原因!!!"
colorEcho ${RED} "如果是CentOS大概率是防火墙的问题。"
fi
}
if [[ ${SYSTEMCTL_CMD} -eq 0 ]]
then
systemctl restart sshd.service
check_success $?
elif [[ ${SERVICE_CMD} -eq 0 ]]
then
service restart sshd.service
check_success $?
else
colorEcho ${RED} "缺少systemctl和service本脚本不支持"
return 23
fi
}
changeSSHLoginPort(){
if grep -xw "Port ${SSHLoginPort}" $sshConfigFile &>/dev/null
then
colorEcho ${BLUE} "当前的ssh登录端口已经为${SSHLoginPort},无需修改!"
else
sed -i "/^#Port 22/a Port ${SSHLoginPort}" $sshConfigFile
if [[ $? -eq 0 ]]
then
colorEcho ${GREEN} "ssh的登陆端口已被修改为${SSHLoginPort},请修改防火墙以开放该端口!!"
fi
fi
}
extendIntervalTime(){
echo "ClientAliveInterval 30" >> /etc/ssh/sshd_config
echo "ClientAliveCountMax 60" >> /etc/ssh/sshd_config
}
modify_firewall(){
echo ""
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
colorEcho $GREEN "本脚本会默认关闭防火墙和SElinux"
systemctl stop firewalld
systemctl disable firewalld
systemctl stop ufw
systemctl disable ufw
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# iptables -F
}
main(){
# 首先检查是否拥有root权限
check_root
# 备份一份sshd的配置文件
back_up_config
# 使用函数修改一些配置
modify_sshd_config
# 增加访问端口改变
changeSSHLoginPort
# 修改ssh的连接中断延时
extendIntervalTime
# 关闭防火墙服务否则无法重启sshd服务
modify_firewall
# 重启SSHD服务
restartSSHDService
}
main

View File

@@ -0,0 +1,115 @@
#!/bin/bash
#########color code#############
RED="31m" # Error message
GREEN="32m" # Success message
YELLOW="33m" # Warning message
BLUE="36m" # Info message
sshConfigFile="/etc/ssh/sshd_config"
## 下面的应该被改成yes
PermitRootLogin="PermitRootLogin"
PasswordAuthentication="PasswordAuthentication"
tcpKeepAlive="TCPKeepAlive"
## 下面的应该被改成no
changeResponseAuthentication="ChallengeResponseAuthentication"
PermitEmptyPasswords="PermitEmptyPasswords"
StrictModes="StrictModes"
###############color echo func#################
colorEcho(){
echo -e "\033[${1}${@:2}\033[0m" 1>& 2
}
check_root(){
if [[ $EUID != 0 ]];then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
back_up_config(){
cp $sshConfigFile $sshConfigFile.backup
}
modify_sshd_config_yes(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 yes" $sshConfigFile
then
shift
elif grep -x "#$1 yes" $sshConfigFile
then
sed -i "s/#$1 yes/$1 yes/g" $sshConfigFile
shift
elif grep -x "$1 no" $sshConfigFile
then
sed -i "s/$1 no/$1 yes/g" $sshConfigFile
shift
else
sed -i "$ a $1 yes" $sshConfigFile
shift
fi
done
}
modify_sshd_config_no(){
numOfElements=$#
while [ $# -gt 0 ]
do
if grep -x "$1 no" $sshConfigFile
then
shift
elif grep -x "#$1 no" $sshConfigFile
then
sed -i "s/#$1 no/$1 no/g" $sshConfigFile
shift
elif grep -x "$1 yes" $sshConfigFile
then
sed -i "s/$1 yes/$1 no/g" $sshConfigFile
shift
else
sed -i "$ a $1 no" $sshConfigFile
shift
fi
done
}
main(){
# 首先检查是否拥有root权限
check_root
# 备份一份sshd的配置文件
back_up_config
declare -a needToChangeYes
declare -a needToChangeNo
needToChangeYes[0]=$tcpKeepAlive
needToChangeYes[1]=$PermitRootLogin
needToChangeYes[2]=$PasswordAuthentication
needToChangeNo[0]=$PermitEmptyPasswords
needToChangeNo[1]=$changeResponseAuthentication
# 以数组的方式 将参数传入函数
modify_sshd_config_yes "${needToChangeYes[@]}"
modify_sshd_config_no "${needToChangeNo[@]}"
systemctl restart sshd.service
if [ $? ];then
echo "sshd文件已经修改成功可以进行root登录请修改root密码"
else
echo "sshd服务重启失败请检查原因"
fi
}
main

View File

@@ -0,0 +1,10 @@
#!/bin/bash
export domain_name=uat
acme.sh --issue -dns dns_ali --force --dnssleep 2 -k ec-256 -d \*.${domain_name}.uavcmlc.com
acme.sh --install-cert -d \*.${domain_name}.uavcmlc.com --ecc \
--key-file /etc/nginx/conf.d/ssl_key/x.${domain_name}.uavcmlc.key.pem \
--fullchain-file /etc/nginx/conf.d/ssl_key/x.${domain_name}.uavcmlc.cert.pem \
--reloadcmd "systemctl restart nginx --force"

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
namespace=zjjt
all_error_app_list=(cmii-uav-user cmii-uav-oauth cmii-admin-user cmii-admin-gateway cmii-uav-gateway cmii-uav-cloud-live cmii-uav-material-warehouse)
for app in ${all_error_app_list[@]};do
echo "current app is ${app}"
old_replicas=$(kubectl -n ${namespace} get deployment ${app} -o jsonpath='{.spec.replicas}')
kubectl -n ${namespace} scale deployment ${app} --replicas=0
sleep 1
kubectl -n ${namespace} scale deployment ${app} --replicas=${old_replicas}
done

View File

@@ -0,0 +1,16 @@
#!/bin/bash
#ip_list=(192.168.35.11 192.168.35.7 192.168.35.6 192.168.34.249 192.168.35.206 192.168.35.209 192.168.35.208 192.168.35.207 192.168.35.80 192.168.35.71 192.168.35.124 192.168.35.125 192.168.35.82 192.168.34.216 192.168.34.125 192.168.34.233 192.168.34.239 192.168.34.241 192.168.34.84 192.168.34.83 192.168.34.56 192.168.11.228 192.168.35.47 192.168.35.45)
ip_list=(192.168.34.250)
for ip in ${ip_list[@]};do
echo "ip is $ip"
# ssh root@${ip} "echo yes"
ssh root@${ip} "ip addr | grep ${ip} -A 3"
echo ""
ssh root@${ip} "cat /etc/os-release"
echo ""
# ssh root@${ip} "echo \"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO6IjOcco9eAvA/T7LDsylCgjKJlLrVXY1zxO1/mX/MTzVZGuAhbikFJT2ZN2Up8iED+pJwpcps3LlA1wOjQC3Q= root@ops-04.ecs.io\" >> /root/.ssh/authorized_keys"
done

View File

@@ -0,0 +1,20 @@
#!/bin/bash
namespace=uavcloud-demo
app_name_list=(uav-gateway cmii-uav-notice cmii-uas-gateway cmii-uas-lifecycle uav-platform-uas)
echo ""
echo "current namespace is $namespace"
echo ""
for app in ${app_name_list[@]};do
echo "current app is $app"
pod_name=$(kubectl -n ${namespace} get pods | grep ${app} | awk '{print$1}' | head -n1)
echo "pod name of app is => $pod_name"
kubectl -n ${namespace} describe pod ${pod_name} | grep "Image ID:" | awk '{print $3}'
echo ""
done

View File

@@ -0,0 +1,130 @@
#!/bin/bash
RED="31m" ## 姨妈红
GREEN="32m" ## 水鸭青
YELLOW="33m" ## 鸭屎黄
PURPLE="35m" ## 基佬紫
BLUE="36m" ## 天依蓝
######## 颜色函数方法很精妙 ############
colorEcho() {
echo -e "\033[${1}${@:2}\033[0m" 1>&2
}
check_root() {
if [[ $EUID != 0 ]]; then
colorEcho ${RED} "当前非root账号(或没有root权限)无法继续操作请更换root账号!"
colorEcho ${YELLOW} "使用sudo -命令获取临时root权限执行后可能会提示输入root密码"
exit 1
fi
}
backupOriginAPT(){
echo "正在备份现有的源文件…………"
mv /etc/apt/sources.list /etc/apt/sources.list.backup
}
changeAPTMirrot(){
local $MirrorAddress=aliyun
if [ $1 -ne "" ]
then
colorEcho $BLUE "开始将源文件替换为( $1 )的镜像源……"
cat >>/etc/apt/sources.list <<EOF
deb $MirrorAddress bionic main restricted universe multiverse
deb $MirrorAddress bionic-security main restricted universe multiverse
deb $MirrorAddress bionic-updates main restricted universe multiverse
deb $MirrorAddress bionic-proposed main restricted universe multiverse
deb $MirrorAddress bionic-backports main restricted universe multiverse
deb-src $MirrorAddress bionic main restricted universe multiverse
deb-src $MirrorAddress bionic-security main restricted universe multiverse
deb-src $MirrorAddress bionic-updates main restricted universe multiverse
deb-src $MirrorAddress bionic-proposed main restricted universe multiverse
deb-src $MirrorAddress bionic-backports main restricted universe multiverse
EOF
fi
}
changeToAliMirror(){
colorEcho $BLUE "开始将源文件替换为阿里云的镜像源……"
cat >>/etc/apt/sources.list <<EOF
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
EOF
}
changeToTunaMirror(){
colorEcho $BLUE"开始将源文件替换为清华的镜像源……"
cat >>/etc/apt/sources.list <<EOF
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse
EOF
}
changeToUSTCMirror(){
colorEcho $BLUE "开始将源文件替换为中科大的镜像源……"
cat >>/etc/apt/sources.list <<EOF
deb https://mirrors.ustc.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu/ bionic-proposed main restricted universe multiverse
EOF
}
postChange(){
echo ""
colorEcho $GREEN "请查看当前的source.list文件中的内容"
colorEcho $GREEN "----------------------------------------------------------------------------------------"
cat /etc/apt/sources.list
colorEcho $GREEN "----------------------------------------------------------------------------------------"
echo ""
colorEcho $BLUE "开始执行更新程序……"
apt-get update
}
main(){
check_root
backupOriginAPT
## 更换为阿里云的apt
changeToAliMirror
## 清华大学源
#changeToTunaMirror
## 中科大源
#changeToUSTCMirror
postChange
}
main

View File

@@ -0,0 +1,86 @@
#!/bin/bash
# 更改主机名称
# 需要收件执行linux-init
OSS_PATH_PREFIX=https://b2.107421.xyz
V2RAY_INSTALL_SCRIPT_PATH=$OSS_PATH_PREFIX/v2ray-install.sh
V2RAY_FILE_OSS_PATH=$OSS_PATH_PREFIX/v2ray-linux-64_v4.32.1.zip
install_v2ray() {
echo "开始下载需要的v2ray文件"
wget $V2RAY_INSTALL_SCRIPT_PATH
wget $V2RAY_FILE_OSS_PATH
echo ""
echo "开始执行V2ray的安装工作"
echo "y
" | bash ./v2ray-install.sh --local v2ray-linux-64_v4.32.1.zip
echo "v2ray安装成功"
}
modify_sock5_v2ray_config() {
echo "开始修改V2ray的配置为socks5的形式"
cat >/usr/local/etc/v2ray/config.json <<EOF
{
"inbounds": [
{
"protocol": "socks",
"port": 28888,
"listen": "0.0.0.0",
"settings": {
"auth": "password",
"accounts": [
{
"user": "zeaslity",
"pass": "lovemm.23"
}
],
"udp": true,
"userLevel": 0
}
},
{
"protocol": "socks",
"port": 58889,
"listen": "0.0.0.0",
"settings": {
"auth": "noauth",
"udp": true,
"userLevel": 0
}
}
],
"dns": {
"network": "udp",
"address": "223.5.5.5",
"port": 53
},
"outbounds": [
{
"protocol": "freedom",
"settings": {}
}
]
}
EOF
echo ""
systemctl restart v2ray
echo "v2ray配置完成 请检查v2ray端口的开放情况"
echo "$(netstat -ntlp | grep 2888)"
}
main() {
install_v2ray
modify_sock5_v2ray_config
}
main

View File

@@ -0,0 +1,14 @@
#!/bin/bash
/snap - 存放已安装的 Snap 包。
/var/lib/snapd - Snap Daemon 的数据存储,包括配置和状态信息。
/etc/snapd - Snap Daemon 的配置文件。
/usr/lib/snapd - Snap Daemon 的二进制文件和库。
/home/<user>/snap - 用户级别的 Snap 应用数据。
folder_list=(/snap /var/lib/snapd /etc/snapd /usr/lib/snapd /home/wdd /etc/nginx /lib/systemd/system/ /data /usr/local/bin /usr/bin /usr/sbin /var/lib/docker /usr/local/)
for folder in ${folder_list[@]};do
echo "current folder is ${folder}"
rsync -avz root@10.250.0.100:${folder} ${folder}
done

View File

@@ -0,0 +1,67 @@
#! /bin/bash
# 关闭虚拟缓存
swapoff -a
cp -f /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak | grep -v swap >/etc/fstab
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeBefore=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之前的root目录的容量为${RootVolumeSizeBefore}"
# echo "y
# " | lvremove /dev/mapper/centos-swap
# freepesize=$(vgdisplay centos | grep 'Free PE' | awk '{print $5}')
# lvextend -l+${freepesize} /dev/mapper/centos-root
# ## #自动扩展XFS文件系统到最大的可用大小
# xfs_growfs /dev/mapper/centos-root
# df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}'
# echo "-----------------------------------------------------------------------"
# RootVolumeSizeAfter=$(df -TH | grep -w "/dev/mapper/centos-root" | awk '{print $3}')
# echo "扩容之后的root目录的容量为${RootVolumeSizeAfter}"
# RootVolumeSizeBeforeNum=$(echo $RootVolumeSizeBefore | cut -d "G" -f1)
# RootVolumeSizeAfterNum=$(echo $RootVolumeSizeAfter | cut -d "G" -f1)
# echo "恭喜您的root目录容量增加了+++++++$(( ${RootVolumeSizeAfterNum}-${RootVolumeSizeBeforeNum} ))GB+++++"
echo ""
echo ""
echo ""
echo "-----------------------------------------------------------------------"
VG_NAME=datavg
echo "n
p
t
8e
w
" | fdisk /dev/sdb
partprobe
vgcreate ${VG_NAME} /dev/sdb1
selfpesize=$(vgdisplay ${VG_NAME} | grep 'Total PE' | awk '{print $3}')
lvcreate -l ${selfpesize} -n lvdata ${VG_NAME}
mkfs.xfs /dev/mapper/${VG_NAME}-lvdata
mkdir -p /data
mkdir -p /var/lib/docker
selffstab="/dev/mapper/${VG_NAME}-lvdata /var/lib/docker xfs defaults 0 0"
echo "${selffstab}" >> /etc/fstab
mount -a
echo ""
echo ""
echo ""
df -TH
echo "-----------------------------------------------------------------------"

View File

@@ -0,0 +1,199 @@
#!/bin/bash
echo "n
p
+5G
t
8e
w
" | fdisk /dev/sdc
partprobe
pvcreate /dev/sdc2
vgextend rootvg /dev/sdc2
export selfpesize=$(vgdisplay rootvg | grep 'Free' | awk '{print $5}')
lvextend -l+${selfpesize} /dev/mapper/rootvg-lvusr
xfs_growfs /dev/mapper/rootvg-lvusr
<h3>k8s节点扩容</h3>
<h5>1、设置时间同步及时区</h5>
```shell
yum makecache fast
yum install ntp -y
ntpdate ntp.ubuntu.com
systemctl start ntpd
systemctl enable ntpd
cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtim
```
<h5>2、设置hostname、selinux等</h5>
```shell
hostnamectl set-hostname dmz-k8s1-n8
systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
setenforce 0
getenforce
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
cat /etc/selinux/config
swapoff -a
free
sed -i 's/\(.*swap.*\)/#\1/' /etc/fstab
cat /etc/fstab
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
modprobe br_netfilter
lsmod | grep br_netfilter
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
sysctl -p
```
<h5>3、安装docker-ce</h5>
```shell
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum install -y containerd.io
yum install -y docker-ce-cli-19.03.13-3.el7.x86_64
yum install -y docker-ce-19.03.13-3.el7.x86_64
rpm -qa |grep docker
mkdir /etc/docker
cat <<EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"insecure-registries": [
"harbor-qa.sre.cdcyy.cn",
"harbor-manager.sre.cdcyy.cn",
"harbor-prod.sre.cdcyy.cn",
"harbor.sre.cdcyy.cn"
],
"registry-mirrors": ["https://qbbxbykw.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl start docker
systemctl enable docker
# Generated by NetworkManager
search ecs.io
nameserver 192.168.34.40
nameserver 223.5.5.5
```
<h5>4.添加ipvs支持</h5>
```shell
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install -y ipset ipvsadm
cat >/etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.all.forwarding = 1
EOF
## 执行命令以应用
sysctl -p /etc/sysctl.d/k8s.conf
```
<h5>5.安装kubelet组件</h5>
```shell
vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=0
repo_gpgcheck=0
exclude=kubelet kubeadm kubectl
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.18.0-0.x86_64 --disableexcludes=kubernetes
yum install -y kubeadm-1.18.0-0.x86_64 --disableexcludes=kubernetes
yum downgrade -y kubectl-1.18.0-0.x86_64 --disableexcludes=kubernetes
rpm -qa |grep kube
systemctl enable --now kubelet
yum install -y kubelet-1.20.4-0.x86_64 --disableexcludes=kubernetes
yum install -y kubeadm-1.20.4-0.x86_64 --disableexcludes=kubernetes
yum downgrade -y kubectl-1.20.4-0.x86_64 --disableexcludes=kubernetes
rpm -qa |grep kube
systemctl enable --now kubelet 这里kubelet会一直报错重启这是正常的因为需要等待kubeadmin
```
<h5>6.加入集群</h5>
```shell
kubeadm token create --print-join-command
kubeadm join 192.168.148.131:6443 --token 1btqa0.r7xinh9gq81qcftx --discovery-token-ca-cert-hash sha256:938924402f317621bdafc937184db37643335f1065db9ba6d5846f021e224ea0
```
<h5>7.打标签加污点</h5>
```shell
kubectl label nodes dmz-k8s1-n8 prolab=xczx
kubectl taint nodes dmz-k8s1-n8 project=countryup:NoSchedule
```
kubeadm token create --print-join-command
yum install -y glusterfs glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel