延庆+山东项目
This commit is contained in:
@@ -8,4 +8,9 @@ https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/harbor-offline-i
|
||||
https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/docker-compose-linux-x86_64-v2.18.0
|
||||
|
||||
|
||||
mv /root/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
|
||||
chmod +x /usr/local/bin/octopus-agent
|
||||
|
||||
printf 'help\n' | octopus-agent --mode=bastion
|
||||
|
||||
file_list=(docker-amd64-20.10.15.tgz harbor-offline-installer-v2.9.0 docker-compose-linux-x86_64-v2.18.0)
|
||||
@@ -7,7 +7,7 @@ host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
|
||||
|
||||
for ip in "${host_list[@]}";do
|
||||
echo "current ip is $ip"
|
||||
ssh root@${ip} "curl 172.24.152.72"
|
||||
ssh root@${server} "curl 172.24.152.72"
|
||||
done
|
||||
|
||||
|
||||
@@ -15,27 +15,27 @@ disk
|
||||
|
||||
10.129.80.245
|
||||
|
||||
mv /root/wdd/octopus-agent_linux_amd64_2024-09-23-17-08-44 /usr/local/bin/octopus-agent
|
||||
mv /root/wdd/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
|
||||
chmod +x /usr/local/bin/octopus-agent
|
||||
|
||||
|
||||
# ssh root@${ip} "mkdir /root/wdd"
|
||||
# scp /usr/local/bin/octopus-agent root@${ip}:/usr/local/bin/octopus-agent
|
||||
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/
|
||||
# scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
|
||||
# scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
|
||||
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/
|
||||
ssh root@${server} "mkdir /root/wdd"
|
||||
scp /usr/local/bin/octopus-agent root@${server}:/usr/local/bin/octopus-agent
|
||||
scp /root/wdd/docker-amd64-20.10.15.tgz root@${server}:/root/wdd/docker-amd64-20.10.15.tgz
|
||||
scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
|
||||
scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${server}:/root/wdd/
|
||||
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${server}:/root/wdd/
|
||||
|
||||
# ssh root@${ip} "chmod +x /usr/local/bin/octopus-agent"
|
||||
# ssh root@${ip} "printf 'firewall\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${ip} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${ip} "printf 'swap\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${ip} "printf 'selinux\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${ip} "printf 'docker\n' | octopus-agent --mode=bastion"
|
||||
# ssh root@${ip} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "chmod +x /usr/local/bin/octopus-agent"
|
||||
ssh root@${server} "printf 'firewall\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'swap\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'selinux\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'docker\n' | octopus-agent --mode=bastion"
|
||||
ssh root@${server} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
|
||||
|
||||
scp /etc/docker/daemon.json root@${ip}:/etc/docker/
|
||||
scp /etc/docker/daemon.json root@${server}:/etc/docker/
|
||||
|
||||
ssh root@${ip} "systemctl restart docker && sleep 3 && docker info"
|
||||
ssh root@${server} "systemctl restart docker && sleep 3 && docker info"
|
||||
|
||||
sed -i '/^$/d' ~/.ssh/*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
namespace=xmyd
|
||||
namespace=bjyd
|
||||
|
||||
install_yq() {
|
||||
wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/yq_linux_amd64 -O /usr/local/bin/yq
|
||||
@@ -43,7 +43,7 @@ backup_all_stateful_sets() {
|
||||
echo ""
|
||||
}
|
||||
|
||||
#install_yq
|
||||
install_yq
|
||||
backup_all_deployment
|
||||
backup_all_service
|
||||
backup_all_stateful_sets
|
||||
|
||||
@@ -5,7 +5,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
|
||||
chmod +x /usr/local/bin/jq
|
||||
|
||||
|
||||
export name_space=bjyd
|
||||
export name_space=xmyd
|
||||
|
||||
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force
|
||||
|
||||
@@ -13,3 +13,4 @@ kubectl get pods -n $name_space -o json | jq -r '.items[] | select(.status.conta
|
||||
|
||||
|
||||
kubectl -n ${name_space} delete pod helm-nacos-0 --force
|
||||
|
||||
24
998-常用脚本/故障恢复脚本/删除重启次数过多的Pod.sh
Normal file
24
998-常用脚本/故障恢复脚本/删除重启次数过多的Pod.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
name_space=xmyd
|
||||
kubectl get pods --namespace="${name_space}" --sort-by='.status.containerStatuses[].restartCount' | awk '$4 > 7 {print $1} ' | grep -v NAME | xargs -I {} kubectl delete -n "${name_space}" pod {} --force
|
||||
|
||||
|
||||
#all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-uat uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
all_cmii_name_space=(uavcloud-test uavcloud-feature uavcloud-dev uavcloud-devflight uavcloud-devoperation)
|
||||
|
||||
echo ""
|
||||
for name_space in "${all_cmii_name_space[@]}"; do
|
||||
echo "[NAMESPACE] - start to deal with namespace [$name_space]"
|
||||
if ! kubectl get ns "$name_space"; then
|
||||
echo "[NAMESPACE] - namespace of [$name_space] not exists !"
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
echo ""
|
||||
|
||||
kubectl get pods --namespace="${name_space}" --sort-by='.status.containerStatuses[].restartCount' | awk '$4 > 30 {print $1} ' | sed 's/-[a-z0-9]\{9,10\}-[a-z0-9]\{5\}$//' | xargs -I {} kubectl scale -n "${name_space}" --replicas=0 deployment {}
|
||||
echo ""
|
||||
|
||||
done
|
||||
1033
998-常用脚本/服务器性能/yabs.sh
Normal file
1033
998-常用脚本/服务器性能/yabs.sh
Normal file
File diff suppressed because it is too large
Load Diff
66
998-常用脚本/服务器性能/磁盘.sh
Normal file
66
998-常用脚本/服务器性能/磁盘.sh
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 设置测试目录
|
||||
TEST_DIR="/var/lib/docker/"
|
||||
# 设置大文件和小文件的大小
|
||||
LARGE_FILE_SIZE=3G # 1GB
|
||||
SMALL_FILE_SIZE=10M # 1MB
|
||||
LARGE_FILE="${TEST_DIR}/large_test_file"
|
||||
SMALL_FILE="${TEST_DIR}/small_test_file"
|
||||
|
||||
# 创建测试目录,如果不存在
|
||||
mkdir -p "$TEST_DIR"
|
||||
|
||||
# 测试大文件读写速度
|
||||
echo "开始大文件读写测试..."
|
||||
|
||||
# 写入大文件并测量时间
|
||||
start_time=$(date +%s.%N)
|
||||
dd if=/dev/zero of="$LARGE_FILE" bs=1M count=1024 conv=fdatasync
|
||||
end_time=$(date +%s.%N)
|
||||
write_time=$(echo "$end_time - $start_time" | bc)
|
||||
write_speed=$(echo "scale=2; 1024 / $write_time" | bc)
|
||||
|
||||
echo "大文件写入速度: ${write_speed} MB/s"
|
||||
|
||||
# 读取大文件并测量时间
|
||||
start_time=$(date +%s.%N)
|
||||
dd if="$LARGE_FILE" of=/dev/null bs=1M
|
||||
end_time=$(date +%s.%N)
|
||||
read_time=$(echo "$end_time - $start_time" | bc)
|
||||
read_speed=$(echo "scale=2; 1024 / $read_time" | bc)
|
||||
|
||||
echo "大文件读取速度: ${read_speed} MB/s"
|
||||
|
||||
# 清理大文件
|
||||
rm -f "$LARGE_FILE"
|
||||
|
||||
# 测试小文件读写速度
|
||||
echo "开始小文件读写测试..."
|
||||
|
||||
# 写入小文件并测量时间
|
||||
start_time=$(date +%s.%N)
|
||||
for i in $(seq 1 3); do
|
||||
dd if=/dev/zero of="${TEST_DIR}/small_test_file_$i" bs=1M count=1 conv=fdatasync
|
||||
done
|
||||
end_time=$(date +%s.%N)
|
||||
write_time=$(echo "$end_time - $start_time" | bc)
|
||||
write_speed=$(echo "scale=2; 1024 / $write_time" | bc)
|
||||
|
||||
echo "小文件写入速度: ${write_speed} MB/s"
|
||||
|
||||
# 读取小文件并测量时间
|
||||
start_time=$(date +%s.%N)
|
||||
for i in $(seq 1 3); do
|
||||
dd if="${TEST_DIR}/small_test_file_$i" of=/dev/null bs=1M
|
||||
done
|
||||
end_time=$(date +%s.%N)
|
||||
read_time=$(echo "$end_time - $start_time" | bc)
|
||||
read_speed=$(echo "scale=2; 1024 / $read_time" | bc)
|
||||
|
||||
echo "小文件读取速度: ${read_speed} MB/s"
|
||||
|
||||
# 清理小文件
|
||||
rm -f "${TEST_DIR}/small_test_file_"*
|
||||
|
||||
echo "测试完成!"
|
||||
67
998-常用脚本/服务器性能/磁盘性能测试.py
Normal file
67
998-常用脚本/服务器性能/磁盘性能测试.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
def write_large_file(file_path, size_mb):
|
||||
"""写入大文件"""
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(os.urandom(size_mb * 1024 * 1024)) # 写入指定大小的随机数据
|
||||
|
||||
def read_large_file(file_path):
|
||||
"""读取大文件"""
|
||||
with open(file_path, 'rb') as f:
|
||||
f.read() # 读取文件内容
|
||||
|
||||
def write_small_files(dir_path, num_files, file_size):
|
||||
"""写入小文件"""
|
||||
for i in range(num_files):
|
||||
file_path = os.path.join(dir_path, f'small_file_{i}.txt')
|
||||
with open(file_path, 'wb') as f:
|
||||
f.write(os.urandom(file_size)) # 写入指定大小的随机数据
|
||||
|
||||
def read_small_files(dir_path, num_files):
|
||||
"""读取小文件"""
|
||||
for i in range(num_files):
|
||||
file_path = os.path.join(dir_path, f'small_file_{i}.txt')
|
||||
with open(file_path, 'rb') as f:
|
||||
f.read() # 读取文件内容
|
||||
|
||||
def measure_io(test_type, *args):
|
||||
"""测量IO性能"""
|
||||
start_time = time.time()
|
||||
if test_type == 'large_write':
|
||||
write_large_file(*args)
|
||||
elif test_type == 'large_read':
|
||||
read_large_file(*args)
|
||||
elif test_type == 'small_write':
|
||||
write_small_files(*args)
|
||||
elif test_type == 'small_read':
|
||||
read_small_files(*args)
|
||||
end_time = time.time()
|
||||
return end_time - start_time
|
||||
|
||||
def main():
|
||||
dir_path = '/var/lib/docker/' # 修改为你的测试目录
|
||||
large_file_size_mb = 10240 # 大文件大小(MB)
|
||||
small_file_size = 1024 # 小文件大小(字节)
|
||||
num_small_files = 100 # 小文件数量
|
||||
|
||||
# 大文件测试
|
||||
print("开始大文件写入测试...")
|
||||
large_write_time = measure_io('large_write', os.path.join(dir_path, 'large_file.bin'), large_file_size_mb)
|
||||
print(f"大文件写入时间: {large_write_time:.2f}秒")
|
||||
|
||||
print("开始大文件读取测试...")
|
||||
large_read_time = measure_io('large_read', os.path.join(dir_path, 'large_file.bin'))
|
||||
print(f"大文件读取时间: {large_read_time:.2f}秒")
|
||||
|
||||
# 小文件测试
|
||||
print("开始小文件写入测试...")
|
||||
small_write_time = measure_io('small_write', dir_path, num_small_files, small_file_size)
|
||||
print(f"小文件写入时间: {small_write_time:.2f}秒")
|
||||
|
||||
print("开始小文件读取测试...")
|
||||
small_read_time = measure_io('small_read', dir_path, num_small_files)
|
||||
print(f"小文件读取时间: {small_read_time:.2f}秒")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -14,7 +14,7 @@ server {
|
||||
proxy_buffering off;
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 12k;
|
||||
proxy_set_header Host fake-domain.jxejpt.io;
|
||||
proxy_set_header Host fake-domain.sdejpt.io;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
@@ -2,7 +2,7 @@ version: '3'
|
||||
|
||||
services:
|
||||
cmii-nginx:
|
||||
image: 10.20.1.135:8033/cmii/nginx:1.21.3
|
||||
image: 134.80.124.7:8033/cmii/nginx:1.21.3
|
||||
volumes:
|
||||
- /etc/nginx/conf.d:/etc/nginx/conf.d
|
||||
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
|
||||
|
||||
55
998-常用脚本/部署脚本/z_执行apply命令.sh
Normal file
55
998-常用脚本/部署脚本/z_执行apply命令.sh
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
kubectl apply -f k8s-dashboard.yaml
|
||||
|
||||
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
|
||||
|
||||
kubectl apply -f k8s-nfs.yaml
|
||||
kubectl delete -f k8s-nfs.yaml
|
||||
|
||||
kubectl -n kube-system describe pod $(kubectl -n kube-system get pods | grep nfs-client-provisioner | awk '{print$1}')
|
||||
|
||||
kubectl apply -f k8s-nfs-test.yaml
|
||||
kubectl delete -f k8s-nfs-test.yaml
|
||||
|
||||
cd /var/lib/docker/nfs_data
|
||||
|
||||
kubectl apply -f k8s-pvc.yaml
|
||||
kubectl delete -f k8s-pvc.yaml
|
||||
|
||||
kubectl apply -f k8s-mongo.yaml
|
||||
kubectl delete -f k8s-mongo.yaml
|
||||
|
||||
kubectl apply -f k8s-emqx.yaml
|
||||
kubectl delete -f k8s-emqx.yaml
|
||||
|
||||
kubectl apply -f k8s-rabbitmq.yaml
|
||||
kubectl delete -f k8s-rabbitmq.yam
|
||||
|
||||
kubectl apply -f k8s-redis.yaml
|
||||
kubectl delete -f k8s-redis.yamll
|
||||
|
||||
kubectl apply -f k8s-mysql.yaml
|
||||
kubectl delete -f k8s-mysql.yaml
|
||||
|
||||
----
|
||||
|
||||
kubectl apply -f k8s-nacos.yaml
|
||||
kubectl delete -f k8s-nacos.yaml
|
||||
|
||||
---
|
||||
|
||||
kubectl apply -f k8s-configmap.yaml
|
||||
kubectl delete -f k8s-configmap.yaml
|
||||
|
||||
kubectl apply -f k8s-ingress.yaml
|
||||
kubectl delete -f k8s-ingress.yaml
|
||||
|
||||
kubectl apply -f k8s-frontend.yaml
|
||||
kubectl delete -f k8s-frontend.yaml
|
||||
|
||||
kubectl apply -f k8s-backend.yaml
|
||||
kubectl delete -f k8s-backend.yaml
|
||||
|
||||
|
||||
kubectl -n kube-system get pods -o jsonpath='{.items[*].metadata.name}'
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
minio_inner_ip_host=10.129.80.223:9000
|
||||
minio_inner_ip_host=134.80.124.15:9000
|
||||
|
||||
download_ts2mp4_file(){
|
||||
echo ""
|
||||
|
||||
@@ -8,7 +8,7 @@ env:
|
||||
value: "eth0"
|
||||
|
||||
# 更加保险
|
||||
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=enp4s3
|
||||
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
|
||||
|
||||
|
||||
# 删除所有的calico pod
|
||||
|
||||
Reference in New Issue
Block a user