日常修改

This commit is contained in:
zeaslity
2024-11-13 09:51:57 +08:00
parent dcbefdead9
commit 8c34f9018c
15 changed files with 1263 additions and 5 deletions

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# 需要在所有的节点执行
hostnamectl set-hostname service-
sed -i "/search/ a nameserver 223.5.5.5" /etc/resolv.conf
echo "AllowTcpForwarding yes" >> /etc/ssh/sshd_config
systemctl restart sshd
cat >> /etc/hosts << EOF
134.80.124.6 master-1
134.80.124.7 master-2
134.80.124.8 master-3
134.80.124.9 worker-1
134.80.124.10 worker-2
134.80.124.11 worker-3
134.80.124.12 worker-4
134.80.124.13 worker-5
134.80.124.14 worker-6
134.80.124.15 service-1
134.80.124.16 service-2
134.80.124.17 service-3
134.80.124.18 service-4
134.80.124.19 database-1
134.80.124.20 database-2
134.80.124.21 nfs-1
134.80.124.22 nfs-2
EOF
ping worker-1
yum clean all && yum makecache
Pa_r8m#Ij0Io(b

View File

@@ -0,0 +1,16 @@
#!/bin/bash
edit_ssh(){
cp /etc/ssh/sshd_config /etc/ssh/sshd_config_back
sed -i "s/PermitRootLogin no/PermitRootLogin yes/g" /etc/ssh/sshd_config
sed -i "s/AllowAgentForwarding no/AllowAgentForwarding yes/g" /etc/ssh/sshd_config
systemctl restart sshd
echo "-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZWQyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLwAAAJCIan+LiGp/iwAAAAtzc2gtZWQyNTUxOQAAACDk8R4KXGgDa5H2r8HrqW1klShoSISV20sLiXZPZPfeLwAAAEDhnul+q0TNTgrO9kfmGsFhtn/rGRIrmhFostjem/QlZuTxHgpcaANrkfavweupbWSVKGhIhJXbSwuJdk9k994vAAAADHdkZEBjbWlpLmNvbQE=
-----END OPENSSH PRIVATE KEY-----">/root/.ssh/id_ed25519
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOTxHgpcaANrkfavweupbWSVKGhIhJXbSwuJdk9k994v wdd@cmii.com">/root/.ssh/id_ed25519.pub
chmod 600 /root/.ssh/id_ed25519
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOTxHgpcaANrkfavweupbWSVKGhIhJXbSwuJdk9k994v wdd@cmii.com">/root/.ssh/authorized_keys
}

View File

@@ -0,0 +1,167 @@
#!/bin/bash
all_image_list_txt="all-cmii-image-list.txt" # 需要修改版本
gzip_image_list_txt="all-gzip-image-list.txt" # 一般不需要修改
oss_prefix_url="https://oss.demo.uavcmlc.com/cmlc-installation"
local_gzip_path="/home/aiox/"
DockerRegisterDomain="134.80.124.7:8033" # 需要根据实际修改
HarborAdminPass=V2ryStr@ngPss # 需要跟第一脚本中的密码保持一致
print_green() {
echo -e "\033[32m${1}\033[0m"
echo ""
}
print_red() {
echo -e "\033[31m${1}\033[0m"
echo ""
}
Download_Load_Tag_Upload() {
print_green "[DLTU] - start !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
dltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
dltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/ahejpt/"
dltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
dltu() {
print_green "download all image name list and gzip file list!"
cd $local_gzip_path || exit
rm $all_image_list_txt
rm $gzip_image_list_txt
wget "$oss_prefix_url$all_image_list_txt"
wget "$oss_prefix_url$gzip_image_list_txt"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
echo ""
while IFS= read -r i; do
[ -z "${i}" ] && continue
echo "download gzip file =>: $oss_prefix_url${i}"
if wget "$oss_prefix_url${i}" >/dev/null 2>&1; then
echo "Gzip file download success : ${i}"
image_full_name=$(docker load -i ${i} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
else
print_red "Gzip file download FAILED : ${i}"
fi
echo "-------------------------------------------------"
done <"${gzip_image_list_txt}"
shift
}
Load_Tag_Upload(){
print_green "[LTU] - start to load image from offline !"
while [[ $# -gt 0 ]]; do
case "$1" in
rke)
# print_green "download rke "
local_gzip_path="$local_gzip_path/rke"
mkdir -p ${local_gzip_path}
oss_prefix_url="$oss_prefix_url/rke/"
ltu
shift # past argument
;;
middle)
local_gzip_path="$local_gzip_path/middle"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/middle/"
ltu
shift # past argument
;;
cmii)
local_gzip_path="$local_gzip_path/cmii"
mkdir -p $local_gzip_path
oss_prefix_url="$oss_prefix_url/ehejpt/"
ltu
shift # past argument
;;
*)
# unknown option
print_red "bad arguments"
;;
esac
done
}
ltu(){
all_file_list=$(find $local_gzip_path -type f -name "*.tar.gz")
for file in $all_file_list; do
echo "offline gzip file is => : $file"
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"
if echo $image_full_name | grep -q "rancher"
then
print_green "tag image to => $DockerRegisterDomain/rancher/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/rancher/$app_name
docker push $DockerRegisterDomain/rancher/$app_name
else
print_green "tag image to => $DockerRegisterDomain/cmii/$app_name"
docker tag ${image_full_name} $DockerRegisterDomain/cmii/$app_name
docker push $DockerRegisterDomain/cmii/$app_name
fi
done
}
test(){
app_name=$(echo "nginx:latest" | sed 's|.*/||g')
echo "extract short name is $app_name"
}
# test
#Download_Load_Tag_Upload "cmii"
Load_Tag_Upload "rke"

View File

@@ -0,0 +1,647 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
annotations: {}
secrets:
- name: helm-mysql
---
apiVersion: v1
kind: Secret
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
type: Opaque
data:
mysql-root-password: "UXpmWFFoZDNiUQ=="
mysql-password: "S0F0cm5PckFKNw=="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
my.cnf: |-
[mysqld]
port=3306
basedir=/opt/bitnami/mysql
datadir=/bitnami/mysql/data
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
socket=/opt/bitnami/mysql/tmp/mysql.sock
log-error=/bitnami/mysql/data/error.log
general_log_file = /bitnami/mysql/data/general.log
slow_query_log_file = /bitnami/mysql/data/slow.log
innodb_data_file_path = ibdata1:512M:autoextend
innodb_buffer_pool_size = 512M
innodb_buffer_pool_instances = 2
innodb_log_file_size = 512M
innodb_log_files_in_group = 4
innodb_log_files_in_group = 4
log-bin = /bitnami/mysql/data/mysql-bin
max_binlog_size=1G
transaction_isolation = REPEATABLE-READ
default_storage_engine = innodb
character-set-server = utf8mb4
collation-server=utf8mb4_bin
binlog_format = ROW
binlog_rows_query_log_events=on
binlog_cache_size=4M
binlog_expire_logs_seconds = 1296000
max_binlog_cache_size=2G
gtid_mode = on
enforce_gtid_consistency = 1
sync_binlog = 1
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
log_slave_updates=1
relay_log_recovery = 1
relay-log-purge = 1
default_time_zone = '+08:00'
lower_case_table_names=1
log_bin_trust_function_creators=1
group_concat_max_len=67108864
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_flush_sync = 0
innodb_flush_neighbors = 0
innodb_write_io_threads = 8
innodb_read_io_threads = 8
innodb_purge_threads = 4
innodb_page_cleaners = 4
innodb_open_files = 65535
innodb_max_dirty_pages_pct = 50
innodb_lru_scan_depth = 4000
innodb_checksum_algorithm = crc32
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = 1
innodb_print_all_deadlocks = 1
innodb_file_per_table = 1
innodb_online_alter_log_max_size = 4G
innodb_stats_on_metadata = 0
innodb_thread_concurrency = 0
innodb_sync_spin_loops = 100
innodb_spin_wait_delay = 30
lock_wait_timeout = 3600
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes =1
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = 1
log_slow_slave_statements = 1
default_authentication_plugin=mysql_native_password
skip-name-resolve=1
explicit_defaults_for_timestamp=1
plugin_dir=/opt/bitnami/mysql/plugin
max_allowed_packet=128M
max_connections = 2000
max_connect_errors = 1000000
table_definition_cache=2000
table_open_cache_instances=64
tablespace_definition_cache=1024
thread_cache_size=256
interactive_timeout = 600
wait_timeout = 600
tmpdir=/opt/bitnami/mysql/tmp
max_allowed_packet=32M
bind-address=0.0.0.0
performance_schema = 1
performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'
innodb_monitor_enable=ALL
[mysql]
no-auto-rehash
[mysqldump]
quick
max_allowed_packet = 32M
[client]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mysql/plugin
[manager]
port=3306
socket=/opt/bitnami/mysql/tmp/mysql.sock
pid-file=/opt/bitnami/mysql/tmp/mysqld.pid
---
apiVersion: v1
kind: ConfigMap
metadata:
name: helm-mysql-init-scripts
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/component: primary
data:
create_users_grants_core.sql: |-
create user zyly@'%' identified by 'Cmii@451315';
grant select on *.* to zyly@'%';
create user zyly_qc@'%' identified by 'Uh)E_owCyb16';
grant all on *.* to zyly_qc@'%';
create user k8s_admin@'%' identified by 'fP#UaH6qQ3)8';
grant all on *.* to k8s_admin@'%';
create user audit_dba@'%' identified by 'PjCzqiBmJaTpgkoYXynH';
grant all on *.* to audit_dba@'%';
create user db_backup@'%' identified by 'RU5Pu(4FGdT9';
GRANT SELECT, RELOAD, PROCESS, LOCK TABLES, REPLICATION CLIENT, EVENT on *.* to db_backup@'%';
create user monitor@'%' identified by 'PL3#nGtrWbf-';
grant REPLICATION CLIENT on *.* to monitor@'%';
flush privileges;
---
kind: Service
apiVersion: v1
metadata:
name: cmii-mysql
namespace: sdejpt
labels:
app.kubernetes.io/component: primary
app.kubernetes.io/managed-by: octopus
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.app: mysql
cmii.type: middleware
octopus.control: mysql-db-wdd
spec:
ports:
- name: mysql
protocol: TCP
port: 13306
targetPort: mysql
selector:
app.kubernetes.io/component: primary
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.app: mysql
cmii.type: middleware
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-master-headless
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-master
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33306
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql-master
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/configuration: 6b60fa0f3a846a6ada8effdc4f823cf8003d42a8c8f630fe8b1b66d3454082dd
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: primary
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mysql-deploy
operator: In
values:
- "master"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 134.80.124.7:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 134.80.124.7:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_DATABASE
value: "cmii"
- name: MYSQL_REPLICATION_MODE
value: "master"
- name: MYSQL_REPLICATION_USER
value: "k8s_admin"
- name: MYSQL_REPLICATION_PASSWORD
value: 'fP#UaH6qQ3)8'
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
- name: custom-init-scripts
mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/sdejpt/
---
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-replication-headless
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
annotations: {}
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: mysql
port: 3306
targetPort: mysql
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
---
apiVersion: v1
kind: Service
metadata:
name: helm-mysql-replication
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
annotations: {}
spec:
type: NodePort
ports:
- name: mysql
port: 3306
protocol: TCP
targetPort: mysql
nodePort: 33307
selector:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: helm-mysql-replication
namespace: sdejpt
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mysql-db
app.kubernetes.io/release: sdejpt
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
serviceName: helm-mysql
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: mysql-db
octopus.control: mysql-db-wdd
app.kubernetes.io/release: sdejpt
app.kubernetes.io/managed-by: octopus
cmii.type: middleware
cmii.app: mysql
app.kubernetes.io/component: replication
spec:
imagePullSecrets:
- name: harborsecret
serviceAccountName: helm-mysql
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: mysql-deploy
operator: In
values:
- "replication"
securityContext:
fsGroup: 1001
initContainers:
- name: change-volume-permissions
image: 134.80.124.7:8033/cmii/bitnami-shell:11-debian-11-r136
imagePullPolicy: "Always"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
securityContext:
runAsUser: 0
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
containers:
- name: mysql
image: 134.80.124.7:8033/cmii/mysql:8.1.0-debian-11-r42
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MYSQL_MASTER_HOST
value: helm-mysql-master
- name: MYSQL_DATABASE
value: "cmii"
- name: MYSQL_REPLICATION_MODE
value: "slave"
- name: MYSQL_MASTER_ROOT_USER
value: "root"
- name: MYSQL_MASTER_PORT_NUMBER
value: "3306"
- name: MYSQL_MASTER_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: helm-mysql
key: mysql-root-password
- name: MYSQL_REPLICATION_USER
value: "k8s_admin"
- name: MYSQL_REPLICATION_PASSWORD
value: 'fP#UaH6qQ3)8'
ports:
- name: mysql
containerPort: 3306
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
readinessProbe:
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
startupProbe:
failureThreshold: 60
initialDelaySeconds: 120
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
exec:
command:
- /bin/bash
- -ec
- |
password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"
resources:
limits: {}
requests: {}
volumeMounts:
- name: mysql-data
mountPath: /bitnami/mysql
# - name: custom-init-scripts
# mountPath: /docker-entrypoint-initdb.d
- name: config
mountPath: /opt/bitnami/mysql/conf/my.cnf
subPath: my.cnf
volumes:
- name: config
configMap:
name: helm-mysql
- name: custom-init-scripts
configMap:
name: helm-mysql-init-scripts
- name: mysql-data
hostPath:
path: /var/lib/docker/mysql-pv/sdejpt/
---

View File

@@ -0,0 +1,274 @@
nodes:
- address: 134.80.124.6
user: root
role:
- controlplane
- etcd
- worker
internal_address: 134.80.124.6
labels:
ingress-deploy: true
- address: 134.80.124.7
user: root
role:
- controlplane
- etcd
- worker
internal_address: 134.80.124.7
labels:
ingress-deploy: true
- address: 134.80.124.8
user: root
role:
- controlplane
- etcd
- worker
internal_address: 134.80.124.8
labels:
ingress-deploy: true
- address: 134.80.124.9
user: root
role:
- worker
internal_address: 134.80.124.9
labels:
uavcloud.env: sdejpt
- address: 134.80.124.10
user: root
role:
- worker
internal_address: 134.80.124.10
labels:
uavcloud.env: sdejpt
- address: 134.80.124.11
user: root
role:
- worker
internal_address: 134.80.124.11
labels:
uavcloud.env: sdejpt
- address: 134.80.124.12
user: root
role:
- worker
internal_address: 134.80.124.12
labels:
uavcloud.env: sdejpt
- address: 134.80.124.13
user: root
role:
- worker
internal_address: 134.80.124.13
labels:
uavcloud.env: sdejpt
- address: 134.80.124.14
user: root
role:
- worker
internal_address: 134.80.124.14
labels:
uavcloud.env: sdejpt
- address: 134.80.124.19
user: root
role:
- worker
internal_address: 134.80.124.119
labels:
mysql-deploy: master
- address: 134.80.124.20
user: root
role:
- worker
internal_address: 134.80.124.20
labels:
mysql-deploy: replication
authenticaion:
strategy: x509
sans:
- "134.80.124.6"
- "134.80.124.7"
- "134.80.124.8"
private_registries:
- url: 134.80.124.7:8033 # 私有镜像库地址
user: admin
password: "V2ryStr@ngPss"
is_default: true
##############################################################################
# 默认值为false如果设置为true当发现不支持的Docker版本时RKE不会报错
ignore_docker_version: true
# Set the name of the Kubernetes cluster
cluster_name: rke-cluster
kubernetes_version: v1.20.4-rancher1-1
ssh_key_path: /root/.ssh/id_ed25519
#ssh_key_path: /root/.ssh/id_rsa
# Enable running cri-dockerd
# Up to Kubernetes 1.23, kubelet contained code called dockershim
# to support Docker runtime. The replacement is called cri-dockerd
# and should be enabled if you want to keep using Docker as your
# container runtime
# Only available to enable in Kubernetes 1.21 and higher
enable_cri_dockerd: true
services:
etcd:
backup_config:
enabled: false
interval_hours: 72
retention: 3
safe_timestamp: false
timeout: 300
creation: 12h
extra_args:
election-timeout: 5000
heartbeat-interval: 500
gid: 0
retention: 72h
snapshot: false
uid: 0
kube-api:
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-controller
service_cluster_ip_range: 172.24.0.0/16
# Expose a different port range for NodePort services
service_node_port_range: 30000-40000
always_pull_images: true
pod_security_policy: false
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Enable audit log to stdout
audit-log-path: "-"
# Increase number of delete workers
delete-collection-workers: 3
# Set the level of log output to warning-level
v: 1
kube-controller:
# CIDR pool used to assign IP addresses to pods in the cluster
cluster_cidr: 172.28.0.0/16
# IP range for any services created on Kubernetes
# This must match the service_cluster_ip_range in kube-api
service_cluster_ip_range: 172.24.0.0/16
# Add additional arguments to the kubernetes API server
# This WILL OVERRIDE any existing defaults
extra_args:
# Set the level of log output to debug-level
v: 1
# Enable RotateKubeletServerCertificate feature gate
feature-gates: RotateKubeletServerCertificate=true
# Enable TLS Certificates management
# https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem"
cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem"
kubelet:
# Base domain for the cluster
cluster_domain: cluster.local
# IP address for the DNS service endpoint
cluster_dns_server: 172.24.0.10
# Fail if swap is on
fail_swap_on: false
# Set max pods to 250 instead of default 110
extra_binds:
- "/data/minio-pv:/hostStorage" # 不要修改 为minio的pv添加
extra_args:
max-pods: 122
# Optionally define additional volume binds to a service
scheduler:
extra_args:
# Set the level of log output to warning-level
v: 0
kubeproxy:
extra_args:
# Set the level of log output to warning-level
v: 1
authorization:
mode: rbac
addon_job_timeout: 30
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
flannel_iface: ens34
flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+
plugin: calico
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
nodelocal: {}
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
ingress:
provider: nginx
default_backend: true
http_port: 0
https_port: 0
extra_envs:
- name: TZ
value: Asia/Shanghai
node_selector:
ingress-deploy: true
options:
use-forwarded-headers: "true"
access-log-path: /var/log/nginx/access.log
client-body-timeout: '6000'
compute-full-forwarded-for: 'true'
enable-underscores-in-headers: 'true'
log-format-escape-json: 'true'
log-format-upstream: >-
{ "msec": "$msec", "connection": "$connection", "connection_requests":
"$connection_requests", "pid": "$pid", "request_id": "$request_id",
"request_length": "$request_length", "remote_addr": "$remote_addr",
"remote_user": "$remote_user", "remote_port": "$remote_port",
"http_x_forwarded_for": "$http_x_forwarded_for", "time_local":
"$time_local", "time_iso8601": "$time_iso8601", "request": "$request",
"request_uri": "$request_uri", "args": "$args", "status": "$status",
"body_bytes_sent": "$body_bytes_sent", "bytes_sent": "$bytes_sent",
"http_referer": "$http_referer", "http_user_agent": "$http_user_agent",
"http_host": "$http_host", "server_name": "$server_name", "request_time":
"$request_time", "upstream": "$upstream_addr", "upstream_connect_time":
"$upstream_connect_time", "upstream_header_time": "$upstream_header_time",
"upstream_response_time": "$upstream_response_time",
"upstream_response_length": "$upstream_response_length",
"upstream_cache_status": "$upstream_cache_status", "ssl_protocol":
"$ssl_protocol", "ssl_cipher": "$ssl_cipher", "scheme": "$scheme",
"request_method": "$request_method", "server_protocol": "$server_protocol",
"pipe": "$pipe", "gzip_ratio": "$gzip_ratio", "http_cf_ray": "$http_cf_ray",
"geoip_country_code": "$geoip_country_code" }
proxy-body-size: 5120m
proxy-read-timeout: '6000'
proxy-send-timeout: '6000'

View File

@@ -0,0 +1,52 @@
#!/bin/bash
host_list=(10.129.80.217)
host_list=(134.80.124.6 134.80.124.7 134.80.124.8 134.80.124.9 134.80.124.10 134.80.124.11 134.80.124.12 134.80.124.13 134.80.124.14 134.80.124.15 134.80.124.16 134.80.124.17 134.80.124.18 134.80.124.19 134.80.124.20 134.80.124.21 134.80.124.22)
host_list=(10.129.80.217 10.129.80.245 10.129.80.222 10.129.80.223)
host_list=(134.80.124.6 134.80.124.7 134.80.124.8 134.80.124.9 134.80.124.10 134.80.124.11 134.80.124.12 134.80.124.13 134.80.124.14 134.80.124.15 134.80.124.16 134.80.124.17 134.80.124.18 134.80.124.19 134.80.124.20 134.80.124.21 134.80.124.22)
host_list=(134.80.124.7 134.80.124.8 134.80.124.9 134.80.124.10 134.80.124.11 134.80.124.12 134.80.124.13 134.80.124.14 134.80.124.15 134.80.124.16 134.80.124.17 134.80.124.18 134.80.124.19 134.80.124.20)
for ip in "${host_list[@]}";do
echo "current ip is $ip"
ssh root@${ip} "echo yes"
echo ""
ssh root@${ip} "curl -s 172.24.38.204"
echo ""
done
mv /home/aiox/octopus-agent_linux_amd64 /usr/local/bin/octopus-agent
chmod +x /usr/local/bin/octopus-agent
ssh root@${ip} "mkdir /root/wdd"
scp octopus-agent root@${ip}:/root/wdd/
scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/
scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/
ssh root@${ip} "chmod +x /usr/local/bin/octopus-agent"
ssh root@${ip} "printf 'firewall\n' | octopus-agent --mode=bastion"
ssh root@${ip} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
ssh root@${ip} "printf 'swap\n' | octopus-agent --mode=bastion"
ssh root@${ip} "printf 'selinux\n' | octopus-agent --mode=bastion"
ssh root@${ip} "printf 'docker\n' | octopus-agent --mode=bastion"
ssh root@${ip} "printf 'dockercompose\n' | octopus-agent --mode=bastion"
printf 'dockerconfig\n' | octopus-agent --mode=bastion
ssh root@${ip} "mkdir /etc/docker"
scp /etc/docker/daemon.json root@${ip}:/etc/docker/
ssh root@${ip} "systemctl restart docker && sleep 3 && docker info"
sleep 3
sed -i '/^$/d' ~/.ssh/*
printf 'rke\n' | octopus-agent --mode=bastion

View File

@@ -20,13 +20,13 @@ chmod +x /usr/local/bin/octopus-agent
# ssh root@${ip} "mkdir /root/wdd"
# scp octopus-agent root@${ip}:/root/wdd/
# scp /usr/local/bin/octopus-agent root@${ip}:/usr/local/bin/octopus-agent
# scp /root/wdd/docker-amd64-20.10.15.tgz root@${ip}:/root/wdd/
# scp /root/wdd/nfs_client_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
# scp /root/wdd/nfs_server_22.04.4_amd64.tar.gz root@${ip}:/root/wdd/
# scp /root/wdd/docker-compose-linux-x86_64-v2.18.0 root@${ip}:/root/wdd/
# ssh root@${ip} "chmod +x octopus-agent"
# ssh root@${ip} "chmod +x /usr/local/bin/octopus-agent"
# ssh root@${ip} "printf 'firewall\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'sysconfig\n' | octopus-agent --mode=bastion"
# ssh root@${ip} "printf 'swap\n' | octopus-agent --mode=bastion"

View File

@@ -5,7 +5,7 @@ wget https://oss.demo.uavcmlc.com/cmlc-installation/downloadfile/amd/jq-linux-am
chmod +x /usr/local/bin/jq
export name_space=xmyd
export name_space=bjyd
kubectl delete pods -n $name_space --field-selector status.phase!=Running --force

View File

@@ -1,6 +1,6 @@
export harbor_host=10.129.80.218:8033
export harbor_host=134.80.124.7:8033
curl -X POST -u "admin:V2ryStr@ngPss" -H "authorization: Basic YWRtaW46VjJyeVN0ckBuZ1Bzcw==" -H "Content-Type: application/json" -d '{"project_name":"cmii","registry_id":null,"metadata":{"public":"true"},"storage_limit":-1}' http://$harbor_host/api/v2.0/projects

View File

@@ -51,6 +51,7 @@ clean_rke_cluster() {
rmmod ipip
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -t raw -F
ip6tables -F && ip6tables -t nat -F && ip6tables -t mangle -F && ip6tables -t raw -F
}

View File

@@ -8,7 +8,7 @@ env:
value: "eth0"
# 更加保险
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=enp4s3
# 删除所有的calico pod

View File

@@ -138,6 +138,8 @@ ltu(){
echo "offline gzip file is => : $file"
image_full_name=$(docker load -i ${file} | head -n1 |awk -F': ' '{print $2}')
docker login -u admin -p ${HarborAdminPass} ${DockerRegisterDomain}
app_name=$(echo "$image_full_name" | sed 's|.*/||g')
echo "extract short name is $app_name"

View File

@@ -0,0 +1,56 @@
services:
mysql-master:
image: 134.80.124.7:8033/cmii/mysql:8.1.0-debian-11-r42
ports:
- '3306:3306'
volumes:
- '/var/lib/docker/mysql_data:/bitnami/mysql/data'
environment:
- MYSQL_REPLICATION_MODE=master
- MYSQL_REPLICATION_USER=repl_user
- MYSQL_USER=my_user
- MYSQL_DATABASE=cmii
# ALLOW_EMPTY_PASSWORD is recommended only for development.
- ALLOW_EMPTY_PASSWORD=no
- MYSQL_ROOT_USER=root
- MYSQL_ROOT_PASSWORD=QzfXQhd3bQ
- MYSQL_MASTER_HOST=134.80.124.19
- MYSQL_MASTER_PORT_NUMBER=3306
healthcheck:
test: ['/bin/bash', '-ec', 'password_aux="${MYSQL_ROOT_PASSWORD:-}"
if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then
password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE")
fi
mysqladmin status -uroot -p"${password_aux}"']
interval: 15s
timeout: 5s
retries: 6
mysql-slave:
image: docker.io/bitnami/mysql:8.4
ports:
- '3306'
depends_on:
- mysql-master
environment:
- MYSQL_REPLICATION_MODE=slave
- MYSQL_REPLICATION_USER=repl_user
- MYSQL_USER=my_user
- MYSQL_DATABASE=my_database
- MYSQL_MASTER_HOST=mysql-master
- MYSQL_MASTER_PORT_NUMBER=3306
- MYSQL_MASTER_ROOT_PASSWORD=my_root_password
# ALLOW_EMPTY_PASSWORD is recommended only for development.
- ALLOW_EMPTY_PASSWORD=yes
# In case of missing binary files on master, use `true` to reset those binary files. Creating a previous backup is recommended.
- MYSQL_REPLICATION_SLAVE_DUMP=false
healthcheck:
test: ['CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh']
interval: 15s
timeout: 5s
retries: 6
volumes:
mysql_master_data:
driver: local