新增firefly引用部署
优化clash verge的DNS问题-极致优化
This commit is contained in:
@@ -1,264 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Gitea 远程备份脚本
|
||||
# Author: System Administrator
|
||||
# Version: 1.0.0
|
||||
# License: MIT
|
||||
#
|
||||
# 功能描述:通过SSH远程执行Gitea备份操作,并将备份文件同步到本地
|
||||
# 依赖要求:ssh, rsync, docker, date, grep, awk 等基础工具
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
################################################################################
|
||||
# 全局常量定义区
|
||||
################################################################################
|
||||
|
||||
readonly REMOTE_PORT="22333"
|
||||
readonly REMOTE_HOST="t0"
|
||||
readonly SCRIPT_DIR="/root/wdd/backup"
|
||||
readonly REMOTE_GITEA_CONTAINER="gitea-gitea-1"
|
||||
readonly REMOTE_GITEA_CONFIG="/bitnami/gitea/custom/conf/app.ini"
|
||||
readonly REMOTE_BACKUP_SOURCE="/data/gitea/gitea_data/data/tmp/gitea-dump-*.zip"
|
||||
readonly LOCAL_BACKUP_TARGET="/data/t0_150_230_198_103/gitea/"
|
||||
|
||||
# > 日志配置
|
||||
readonly LOG_DIR="${SCRIPT_DIR}/logs"
|
||||
readonly LOG_FILE="${LOG_DIR}/gitea_backup_$(date +%Y%m%d).log"
|
||||
|
||||
# 日志级别常量
|
||||
readonly LOG_LEVEL_DEBUG=0
|
||||
readonly LOG_LEVEL_INFO=1
|
||||
readonly LOG_LEVEL_WARN=2
|
||||
readonly LOG_LEVEL_ERROR=3
|
||||
|
||||
# 当前日志级别(默认INFO)
|
||||
CURRENT_LOG_LEVEL=${LOG_LEVEL_INFO}
|
||||
|
||||
################################################################################
|
||||
# 函数声明区
|
||||
################################################################################
|
||||
|
||||
# 输出格式化日志信息(同时输出到控制台和日志文件)
|
||||
# @param level string 日志级别(DEBUG/INFO/WARN/ERROR)
|
||||
# @param message string 日志消息内容
|
||||
# @return void
|
||||
# @require CURRENT_LOG_LEVEL, LOG_FILE
|
||||
log_message() {
|
||||
local level="$1"
|
||||
local message="$2"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local log_entry=""
|
||||
case "$level" in
|
||||
"DEBUG")
|
||||
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_DEBUG} ]; then
|
||||
log_entry="[DEBUG][${timestamp}] ${message}"
|
||||
echo "${log_entry}"
|
||||
echo "${log_entry}" >> "${LOG_FILE}"
|
||||
fi
|
||||
;;
|
||||
"INFO")
|
||||
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_INFO} ]; then
|
||||
log_entry="[INFO][${timestamp}] ${message}"
|
||||
echo "${log_entry}"
|
||||
echo "${log_entry}" >> "${LOG_FILE}"
|
||||
fi
|
||||
;;
|
||||
"WARN")
|
||||
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_WARN} ]; then
|
||||
log_entry="[WARN][${timestamp}] ${message}"
|
||||
echo "${log_entry}" >&2
|
||||
echo "${log_entry}" >> "${LOG_FILE}"
|
||||
fi
|
||||
;;
|
||||
"ERROR")
|
||||
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_ERROR} ]; then
|
||||
log_entry="[ERROR][${timestamp}] ${message}"
|
||||
echo "${log_entry}" >&2
|
||||
echo "${log_entry}" >> "${LOG_FILE}"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
log_entry="[UNKNOWN][${timestamp}] ${message}"
|
||||
echo "${log_entry}" >&2
|
||||
echo "${log_entry}" >> "${LOG_FILE}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
###
|
||||
# 执行远程SSH命令
|
||||
# @param command string 需要执行的远程命令
|
||||
# @return int 命令执行退出码
|
||||
# @require REMOTE_HOST, REMOTE_PORT
|
||||
execute_remote_command() {
|
||||
local command="$1"
|
||||
local exit_code
|
||||
|
||||
log_message "DEBUG" "执行远程命令: ${command}"
|
||||
|
||||
# > 通过SSH连接到远程主机执行命令
|
||||
ssh -p "${REMOTE_PORT}" "${REMOTE_HOST}" "${command}"
|
||||
exit_code=$?
|
||||
|
||||
if [ ${exit_code} -ne 0 ]; then
|
||||
log_message "ERROR" "远程命令执行失败,退出码: ${exit_code}"
|
||||
return ${exit_code}
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 执行Gitea备份操作
|
||||
# @return int 操作执行状态码
|
||||
# @require REMOTE_GITEA_CONTAINER, REMOTE_GITEA_CONFIG
|
||||
perform_gitea_backup() {
|
||||
local backup_command="docker exec -i ${REMOTE_GITEA_CONTAINER} /opt/bitnami/gitea/bin/gitea dump -c ${REMOTE_GITEA_CONFIG}"
|
||||
|
||||
log_message "INFO" "开始执行Gitea备份..."
|
||||
|
||||
# > 执行Gitea dump命令生成备份文件
|
||||
if ! execute_remote_command "${backup_command}"; then
|
||||
log_message "ERROR" "Gitea备份命令执行失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "Gitea备份命令执行成功"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 重命名备份文件(添加时间戳)
|
||||
# @return int 操作执行状态码
|
||||
# @require REMOTE_GITEA_CONTAINER
|
||||
rename_backup_file() {
|
||||
local rename_command="docker exec -i ${REMOTE_GITEA_CONTAINER} /bin/sh -c \"mv /opt/bitnami/gitea/gitea-dump-*.zip /opt/bitnami/gitea/data/tmp/gitea-dump-\$(date +%Y%m%d-%H%M%S).zip\""
|
||||
|
||||
log_message "INFO" "开始重命名备份文件..."
|
||||
|
||||
# > 在容器内重命名备份文件,添加时间戳
|
||||
if ! execute_remote_command "${rename_command}"; then
|
||||
log_message "ERROR" "备份文件重命名失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "备份文件重命名成功"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 同步备份文件到本地
|
||||
# @return int 操作执行状态码
|
||||
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_BACKUP_SOURCE, LOCAL_BACKUP_TARGET
|
||||
sync_backup_to_local() {
|
||||
log_message "INFO" "开始同步备份文件到本地..."
|
||||
|
||||
# > 创建本地目标目录(如果不存在)
|
||||
if [ ! -d "${LOCAL_BACKUP_TARGET}" ]; then
|
||||
mkdir -p "${LOCAL_BACKUP_TARGET}"
|
||||
log_message "DEBUG" "创建本地目录: ${LOCAL_BACKUP_TARGET}"
|
||||
fi
|
||||
|
||||
# > 使用rsync同步文件,保留关键属性
|
||||
rsync -avz -e "ssh -p ${REMOTE_PORT}" \
|
||||
"${REMOTE_HOST}:${REMOTE_BACKUP_SOURCE}" \
|
||||
"${LOCAL_BACKUP_TARGET}"
|
||||
|
||||
local exit_code=$?
|
||||
if [ ${exit_code} -ne 0 ]; then
|
||||
log_message "ERROR" "rsync同步失败,退出码: ${exit_code}"
|
||||
return ${exit_code}
|
||||
fi
|
||||
|
||||
log_message "INFO" "备份文件同步成功"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 清理远程备份文件
|
||||
# @return int 操作执行状态码
|
||||
# @require REMOTE_BACKUP_SOURCE
|
||||
cleanup_remote_backup() {
|
||||
local cleanup_command="rm -f ${REMOTE_BACKUP_SOURCE}"
|
||||
|
||||
log_message "INFO" "开始清理远程备份文件..."
|
||||
|
||||
# > 删除远程主机上的临时备份文件
|
||||
if ! execute_remote_command "${cleanup_command}"; then
|
||||
log_message "ERROR" "远程备份文件清理失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "远程备份文件清理成功"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 主执行函数 - 协调整个备份流程
|
||||
# @return int 脚本执行最终状态码
|
||||
main() {
|
||||
local overall_success=true
|
||||
|
||||
log_message "INFO" "=== Gitea备份流程开始 ==="
|
||||
|
||||
# 切换到工作目录
|
||||
cd "${SCRIPT_DIR}" || {
|
||||
log_message "ERROR" "无法切换到工作目录: ${SCRIPT_DIR}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# 执行备份流程
|
||||
if ! perform_gitea_backup; then
|
||||
overall_success=false
|
||||
fi
|
||||
|
||||
if ! rename_backup_file; then
|
||||
overall_success=false
|
||||
fi
|
||||
|
||||
if ! sync_backup_to_local; then
|
||||
overall_success=false
|
||||
fi
|
||||
|
||||
if ! cleanup_remote_backup; then
|
||||
overall_success=false
|
||||
fi
|
||||
|
||||
# 汇总执行结果
|
||||
if [ "${overall_success}" = true ]; then
|
||||
log_message "INFO" "=== Gitea备份流程完成 ==="
|
||||
return 0
|
||||
else
|
||||
log_message "ERROR" "=== Gitea备份流程部分失败 ==="
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# 异常处理设置
|
||||
################################################################################
|
||||
|
||||
# 设置trap捕获信号
|
||||
trap 'log_message "ERROR" "脚本被中断"; exit 1' INT TERM
|
||||
|
||||
################################################################################
|
||||
# 主执行流程
|
||||
################################################################################
|
||||
|
||||
# 函数调用关系:
|
||||
# main -> perform_gitea_backup -> execute_remote_command
|
||||
# -> rename_backup_file -> execute_remote_command
|
||||
# -> sync_backup_to_local
|
||||
# -> cleanup_remote_backup -> execute_remote_command
|
||||
|
||||
# 执行主函数
|
||||
if main; then
|
||||
log_message "INFO" "脚本执行成功"
|
||||
exit 0
|
||||
else
|
||||
log_message "ERROR" "脚本执行失败"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,361 +0,0 @@
|
||||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# nextcloud备份脚本
|
||||
# 功能:远程Nextcloud维护模式切换、数据库备份、文件同步及清理
|
||||
# 版本:1.0.0
|
||||
# 作者:Shell脚本工程师
|
||||
# 许可证:MIT License
|
||||
# 依赖:ssh, rsync, docker (远程主机), mariadb-client (远程主机)
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
# > 全局常量定义
|
||||
readonly SCRIPT_NAME="$(basename "$0")"
|
||||
readonly SCRIPT_DIR="/root/wdd/backup"
|
||||
readonly LOCK_FILE="/root/wdd/backup/${SCRIPT_NAME}.lock"
|
||||
|
||||
# > 远程主机配置
|
||||
readonly REMOTE_HOST="s5"
|
||||
readonly REMOTE_PORT="22333"
|
||||
readonly REMOTE_USER="root"
|
||||
readonly REMOTE_NEXTCLOUD_DIR="/data/nextcloud"
|
||||
readonly REMOTE_DB_CONTAINER="nextcloud-db"
|
||||
readonly REMOTE_WEB_CONTAINER="nextcloud_web"
|
||||
|
||||
# > 数据库配置
|
||||
readonly DB_NAME="nextcloud"
|
||||
readonly DB_USER="nextcloud"
|
||||
readonly DB_PASSWORD="boge14@Level5"
|
||||
|
||||
# > 本地配置
|
||||
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/nextcloud"
|
||||
|
||||
# > 日志配置
|
||||
readonly LOG_DIR="${SCRIPT_DIR}/logs"
|
||||
readonly LOG_FILE="${LOG_DIR}/nextcloud_backup_$(date +%Y%m%d).log"
|
||||
|
||||
# > 颜色输出定义
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# =============================================================================
|
||||
# 日志函数集
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 初始化日志系统
|
||||
# @require 无
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
init_log_system() {
|
||||
mkdir -p "${LOG_DIR}" || return 1
|
||||
touch "${LOG_FILE}" || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 记录日志消息
|
||||
# @param level string 日志级别(DEBUG/INFO/WARN/ERROR)
|
||||
# @param message string 日志消息
|
||||
# @require LOG_FILE
|
||||
# @return 0 成功
|
||||
###
|
||||
log_message() {
|
||||
local level="$1"
|
||||
local message="$2"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
case "${level}" in
|
||||
"DEBUG") echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
"INFO") echo -e "${GREEN}[INFO]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
"WARN") echo -e "${YELLOW}[WARN]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
|
||||
"ERROR") echo -e "${RED}[ERROR]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
|
||||
*) echo "${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
esac
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 工具函数集
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 检查命令是否存在
|
||||
# @param command_name string 命令名称
|
||||
# @require 无
|
||||
# @return 0 存在 | 1 不存在
|
||||
###
|
||||
check_command() {
|
||||
local command_name="$1"
|
||||
if ! command -v "${command_name}" >/dev/null 2>&1; then
|
||||
log_message "ERROR" "命令不存在: ${command_name}"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 执行远程SSH命令
|
||||
# @param command string 要执行的命令
|
||||
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER
|
||||
# @return 远程命令的退出码
|
||||
###
|
||||
execute_remote_command() {
|
||||
local command="$1"
|
||||
ssh -p "${REMOTE_PORT}" "${REMOTE_USER}@${REMOTE_HOST}" "${command}"
|
||||
return $?
|
||||
}
|
||||
|
||||
###
|
||||
# 创建锁文件防止并发执行
|
||||
# @require LOCK_FILE
|
||||
# @return 0 成功获取锁 | 1 锁已存在
|
||||
###
|
||||
acquire_lock() {
|
||||
if [ -e "${LOCK_FILE}" ]; then
|
||||
log_message "ERROR" "备份任务正在运行或异常退出,请检查锁文件: ${LOCK_FILE}"
|
||||
return 1
|
||||
fi
|
||||
echo "$$" > "${LOCK_FILE}"
|
||||
trap 'release_lock' EXIT
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 释放锁文件
|
||||
# @require LOCK_FILE
|
||||
# @return 0 成功
|
||||
###
|
||||
release_lock() {
|
||||
[ -e "${LOCK_FILE}" ] && rm -f "${LOCK_FILE}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# Nextcloud核心备份函数
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 启用Nextcloud维护模式
|
||||
# @require execute_remote_command, REMOTE_WEB_CONTAINER
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
enable_maintenance_mode() {
|
||||
log_message "INFO" "启用Nextcloud维护模式..."
|
||||
|
||||
local maintenance_cmd="docker exec -u www-data ${REMOTE_WEB_CONTAINER} php occ maintenance:mode --on"
|
||||
|
||||
if ! execute_remote_command "${maintenance_cmd}"; then
|
||||
log_message "ERROR" "启用维护模式失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "维护模式已启用"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 禁用Nextcloud维护模式
|
||||
# @require execute_remote_command, REMOTE_WEB_CONTAINER
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
disable_maintenance_mode() {
|
||||
log_message "INFO" "禁用Nextcloud维护模式..."
|
||||
|
||||
local maintenance_cmd="docker exec -u www-data ${REMOTE_WEB_CONTAINER} php occ maintenance:mode --off"
|
||||
|
||||
if ! execute_remote_command "${maintenance_cmd}"; then
|
||||
log_message "ERROR" "禁用维护模式失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "维护模式已禁用"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 远程执行MariaDB数据库备份
|
||||
# @require execute_remote_command, REMOTE_DB_CONTAINER, DB_NAME, DB_USER, DB_PASSWORD, REMOTE_NEXTCLOUD_DIR
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
backup_database() {
|
||||
log_message "INFO" "开始数据库备份..."
|
||||
|
||||
local backup_file="${REMOTE_NEXTCLOUD_DIR}/nextcloud-db_backup_$(date +%Y%m%d-%H%M%S).sql"
|
||||
local backup_cmd="docker exec ${REMOTE_DB_CONTAINER} mariadb-dump --single-transaction -h localhost -u ${DB_USER} -p'${DB_PASSWORD}' ${DB_NAME} > ${backup_file}"
|
||||
|
||||
if ! execute_remote_command "${backup_cmd}"; then
|
||||
log_message "ERROR" "数据库备份失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# > 验证备份文件是否创建成功
|
||||
local verify_cmd="[ -f \"${backup_file}\" ] && echo \"exists\" || echo \"missing\""
|
||||
if [ "$(execute_remote_command "${verify_cmd}")" != "exists" ]; then
|
||||
log_message "ERROR" "数据库备份文件创建失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "数据库备份完成: ${backup_file}"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 使用rsync同步Nextcloud文件到本地
|
||||
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER, REMOTE_NEXTCLOUD_DIR, LOCAL_BACKUP_DIR
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
sync_nextcloud_files() {
|
||||
log_message "INFO" "开始同步Nextcloud文件到本地..."
|
||||
|
||||
# > 创建本地暂存目录
|
||||
mkdir -p "${LOCAL_BACKUP_DIR}" || {
|
||||
log_message "ERROR" "创建本地暂存目录失败: ${LOCAL_BACKUP_DIR}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# > 构建rsync命令
|
||||
local rsync_cmd="rsync -avz --progress -e 'ssh -p ${REMOTE_PORT}'"
|
||||
rsync_cmd+=" ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_NEXTCLOUD_DIR}/"
|
||||
rsync_cmd+=" ${LOCAL_BACKUP_DIR}/"
|
||||
|
||||
# > 执行rsync同步
|
||||
if ! eval "${rsync_cmd}"; then
|
||||
log_message "ERROR" "Nextcloud文件同步失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "Nextcloud文件同步完成"
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
###
|
||||
# 远程删除数据库备份文件
|
||||
# @require execute_remote_command, REMOTE_NEXTCLOUD_DIR
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
remote_cleanup_backup() {
|
||||
log_message "INFO" "清理远程数据库备份文件..."
|
||||
|
||||
local cleanup_cmd="rm -f ${REMOTE_NEXTCLOUD_DIR}/nextcloud-db_backup_*.sql"
|
||||
|
||||
if ! execute_remote_command "${cleanup_cmd}"; then
|
||||
log_message "ERROR" "远程清理失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "远程清理完成"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 清理本地暂存目录
|
||||
# @require LOCAL_BACKUP_DIR
|
||||
# @return 0 成功
|
||||
###
|
||||
local_cleanup() {
|
||||
log_message "INFO" "清理本地暂存目录..."
|
||||
[ -d "${LOCAL_BACKUP_DIR}" ] && rm -rf "${LOCAL_BACKUP_DIR}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 主执行流程
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 主备份流程
|
||||
# @require 所有上述函数
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
main_backup_process() {
|
||||
log_message "INFO" "=== 开始Nextcloud备份任务 ==="
|
||||
|
||||
# > 检查依赖命令
|
||||
local required_commands=("ssh" "rsync")
|
||||
for cmd in "${required_commands[@]}"; do
|
||||
if ! check_command "${cmd}"; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
# > 执行备份流程
|
||||
local steps=(
|
||||
enable_maintenance_mode
|
||||
backup_database
|
||||
sync_nextcloud_files
|
||||
remote_cleanup_backup
|
||||
disable_maintenance_mode
|
||||
# local_cleanup
|
||||
)
|
||||
|
||||
for step in "${steps[@]}"; do
|
||||
if ! "${step}"; then
|
||||
log_message "ERROR" "备份任务失败,正在尝试恢复..."
|
||||
# > 尝试禁用维护模式
|
||||
disable_maintenance_mode || true
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
log_message "INFO" "=== Nextcloud备份任务完成 ==="
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 脚本入口点
|
||||
# =============================================================================
|
||||
|
||||
# > 设置错误处理
|
||||
trap 'log_message "ERROR" "脚本异常退出"; disable_maintenance_mode || true; release_lock; exit 1' ERR
|
||||
|
||||
# > 主执行块
|
||||
main() {
|
||||
if ! acquire_lock; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! init_log_system; then
|
||||
log_message "ERROR" "日志系统初始化失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! main_backup_process; then
|
||||
log_message "ERROR" "备份任务执行失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
release_lock
|
||||
exit 0
|
||||
}
|
||||
|
||||
# > 脚本执行入口
|
||||
main "$@"
|
||||
|
||||
# =============================================================================
|
||||
# 函数调用关系图
|
||||
# =============================================================================
|
||||
# main
|
||||
# ├── acquire_lock
|
||||
# ├── init_log_system
|
||||
# └── main_backup_process
|
||||
# ├── check_command (多次调用)
|
||||
# ├── enable_maintenance_mode
|
||||
# │ └── execute_remote_command
|
||||
# ├── backup_database
|
||||
# │ └── execute_remote_command
|
||||
# ├── sync_nextcloud_files
|
||||
# ├── move_to_backup_dir
|
||||
# ├── remote_cleanup_backup
|
||||
# │ └── execute_remote_command
|
||||
# ├── disable_maintenance_mode
|
||||
# │ └── execute_remote_command
|
||||
# └── local_cleanup
|
||||
|
||||
@@ -1,342 +0,0 @@
|
||||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# b-vault-warden备份脚本
|
||||
# 功能:远程执行Vaultwarden备份、同步备份文件、加密压缩及清理
|
||||
# 版本:1.0.0
|
||||
# 作者:Shell脚本工程师
|
||||
# 许可证:MIT License
|
||||
# 依赖:ssh, rsync, 7zip, docker (远程主机)
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
# > 全局常量定义
|
||||
readonly SCRIPT_NAME="$(basename "$0")"
|
||||
readonly SCRIPT_DIR="/root/wdd/backup"
|
||||
readonly LOCK_FILE="/root/wdd/backup/${SCRIPT_NAME}.lock"
|
||||
|
||||
# > 配置参数(可根据需要调整为环境变量)
|
||||
readonly REMOTE_HOST="s5"
|
||||
readonly REMOTE_PORT="22333"
|
||||
readonly REMOTE_USER="root"
|
||||
readonly REMOTE_BACKUP_CMD="docker exec vault-warden /vaultwarden backup"
|
||||
readonly REMOTE_DATA_DIR="/data/vault-warden/persist-data"
|
||||
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/vault_warden"
|
||||
readonly BACKUP_PATTERNS=(
|
||||
"config.json"
|
||||
"rsa_key*"
|
||||
"attachments"
|
||||
"icon_cache"
|
||||
"sends"
|
||||
"db_*.sqlite3"
|
||||
)
|
||||
readonly ENCRYPTION_PASSWORD="SuperWdd.123" # > 请在实际使用时修改
|
||||
|
||||
# > 日志配置
|
||||
readonly LOG_DIR="${SCRIPT_DIR}/logs"
|
||||
readonly LOG_FILE="${LOG_DIR}/vault_warden_backup_$(date +%Y%m%d).log"
|
||||
|
||||
# > 颜色输出定义
|
||||
readonly RED='\033[0;31m'
|
||||
readonly GREEN='\033[0;32m'
|
||||
readonly YELLOW='\033[1;33m'
|
||||
readonly BLUE='\033[0;34m'
|
||||
readonly NC='\033[0m'
|
||||
|
||||
# =============================================================================
|
||||
# 日志函数集
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 初始化日志系统
|
||||
# @require 无
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
init_log_system() {
|
||||
mkdir -p "${LOG_DIR}" || return 1
|
||||
touch "${LOG_FILE}" || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 记录日志消息
|
||||
# @param level string 日志级别(DEBUG/INFO/WARN/ERROR)
|
||||
# @param message string 日志消息
|
||||
# @require LOG_FILE
|
||||
# @return 0 成功
|
||||
###
|
||||
log_message() {
|
||||
local level="$1"
|
||||
local message="$2"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
case "${level}" in
|
||||
"DEBUG") echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
"INFO") echo -e "${GREEN}[INFO]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
"WARN") echo -e "${YELLOW}[WARN]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
|
||||
"ERROR") echo -e "${RED}[ERROR]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
|
||||
*) echo "${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
|
||||
esac
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 工具函数集
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 检查命令是否存在
|
||||
# @param command_name string 命令名称
|
||||
# @require 无
|
||||
# @return 0 存在 | 1 不存在
|
||||
###
|
||||
check_command() {
|
||||
local command_name="$1"
|
||||
if ! command -v "${command_name}" >/dev/null 2>&1; then
|
||||
log_message "ERROR" "命令不存在: ${command_name}"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 执行远程SSH命令
|
||||
# @param command string 要执行的命令
|
||||
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER
|
||||
# @return 远程命令的退出码
|
||||
###
|
||||
execute_remote_command() {
|
||||
local command="$1"
|
||||
ssh -p "${REMOTE_PORT}" "${REMOTE_USER}@${REMOTE_HOST}" "${command}"
|
||||
return $?
|
||||
}
|
||||
|
||||
###
|
||||
# 创建锁文件防止并发执行
|
||||
# @require LOCK_FILE
|
||||
# @return 0 成功获取锁 | 1 锁已存在
|
||||
###
|
||||
acquire_lock() {
|
||||
if [ -e "${LOCK_FILE}" ]; then
|
||||
log_message "ERROR" "备份任务正在运行或异常退出,请检查锁文件: ${LOCK_FILE}"
|
||||
return 1
|
||||
fi
|
||||
echo "$$" > "${LOCK_FILE}"
|
||||
trap 'release_lock' EXIT
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 释放锁文件
|
||||
# @require LOCK_FILE
|
||||
# @return 0 成功
|
||||
###
|
||||
release_lock() {
|
||||
[ -e "${LOCK_FILE}" ] && rm -f "${LOCK_FILE}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 核心备份函数
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 远程执行Vaultwarden备份命令
|
||||
# @require execute_remote_command, REMOTE_BACKUP_CMD
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
remote_execute_backup() {
|
||||
log_message "INFO" "开始在远程主机执行Vaultwarden备份..."
|
||||
|
||||
if ! execute_remote_command "${REMOTE_BACKUP_CMD}"; then
|
||||
log_message "ERROR" "远程备份命令执行失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "远程备份命令执行成功"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 使用rsync同步备份文件到本地
|
||||
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER, REMOTE_DATA_DIR, LOCAL_BACKUP_DIR, BACKUP_PATTERNS
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
sync_backup_files() {
|
||||
log_message "INFO" "开始同步备份文件到本地..."
|
||||
|
||||
# > 创建本地暂存目录
|
||||
mkdir -p "${LOCAL_BACKUP_DIR}" || {
|
||||
log_message "ERROR" "创建本地暂存目录失败: ${LOCAL_BACKUP_DIR}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# > 构建rsync命令
|
||||
local rsync_cmd="rsync -avz --progress -e 'ssh -p ${REMOTE_PORT}'"
|
||||
|
||||
for pattern in "${BACKUP_PATTERNS[@]}"; do
|
||||
rsync_cmd+=" ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DATA_DIR}/${pattern}"
|
||||
done
|
||||
|
||||
rsync_cmd+=" ${LOCAL_BACKUP_DIR}/"
|
||||
|
||||
# > 执行rsync同步
|
||||
if ! eval "${rsync_cmd}"; then
|
||||
log_message "ERROR" "文件同步失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "文件同步完成"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 使用7zip加密压缩备份文件
|
||||
# @require LOCAL_BACKUP_DIR, LOCAL_BACKUP_DIR, ENCRYPTION_PASSWORD
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
encrypt_and_compress() {
|
||||
log_message "INFO" "开始加密压缩备份文件..."
|
||||
|
||||
# > 检查7zip命令
|
||||
if ! check_command "7z"; then
|
||||
log_message "ERROR" "7zip命令不存在,请安装p7zip-full包"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# > 创建备份目录
|
||||
mkdir -p "${LOCAL_BACKUP_DIR}" || {
|
||||
log_message "ERROR" "创建备份目录失败: ${LOCAL_BACKUP_DIR}"
|
||||
return 1
|
||||
}
|
||||
|
||||
local backup_file="${LOCAL_BACKUP_DIR}/vaultwarden-backup-$(date +%Y%m%d-%H%M%S).7z"
|
||||
|
||||
# > 执行加密压缩
|
||||
if ! (cd "${LOCAL_BACKUP_DIR}" && 7z a -p"${ENCRYPTION_PASSWORD}" -mhe=on "${backup_file}" . >/dev/null); then
|
||||
log_message "ERROR" "加密压缩失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "加密压缩完成: ${backup_file}"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 远程删除备份数据库文件
|
||||
# @require execute_remote_command, REMOTE_DATA_DIR
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
remote_cleanup_backup() {
|
||||
log_message "INFO" "开始清理远程备份文件..."
|
||||
|
||||
local cleanup_cmd="rm -rf ${REMOTE_DATA_DIR}/db_*.sqlite3"
|
||||
|
||||
if ! execute_remote_command "${cleanup_cmd}"; then
|
||||
log_message "ERROR" "远程清理失败"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "INFO" "远程清理完成"
|
||||
return 0
|
||||
}
|
||||
|
||||
###
|
||||
# 清理本地暂存目录
|
||||
# @require LOCAL_BACKUP_DIR
|
||||
# @return 0 成功
|
||||
###
|
||||
local_cleanup() {
|
||||
log_message "INFO" "清理本地暂存目录..."
|
||||
[ -d "${LOCAL_BACKUP_DIR}" ] && rm -rf "${LOCAL_BACKUP_DIR}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 主执行流程
|
||||
# =============================================================================
|
||||
|
||||
###
|
||||
# 主备份流程
|
||||
# @require 所有上述函数
|
||||
# @return 0 成功 | >0 失败
|
||||
###
|
||||
main_backup_process() {
|
||||
log_message "INFO" "=== 开始Vaultwarden备份任务 ==="
|
||||
|
||||
# > 检查依赖命令
|
||||
local required_commands=("ssh" "rsync" "7z")
|
||||
for cmd in "${required_commands[@]}"; do
|
||||
if ! check_command "${cmd}"; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
# > 执行备份流程
|
||||
local steps=(
|
||||
remote_execute_backup
|
||||
sync_backup_files
|
||||
encrypt_and_compress
|
||||
remote_cleanup_backup
|
||||
# local_cleanup
|
||||
)
|
||||
|
||||
for step in "${steps[@]}"; do
|
||||
if ! "${step}"; then
|
||||
log_message "ERROR" "备份任务在第 ${#steps[@]} 步失败"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
log_message "INFO" "=== Vaultwarden备份任务完成 ==="
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# 脚本入口点
|
||||
# =============================================================================
|
||||
|
||||
# > 设置错误处理
|
||||
trap 'log_message "ERROR" "脚本异常退出"; release_lock; exit 1' ERR
|
||||
|
||||
# > 主执行块
|
||||
main() {
|
||||
if ! acquire_lock; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! init_log_system; then
|
||||
log_message "ERROR" "日志系统初始化失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! main_backup_process; then
|
||||
log_message "ERROR" "备份任务执行失败"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
release_lock
|
||||
exit 0
|
||||
}
|
||||
|
||||
# > 脚本执行入口
|
||||
main "$@"
|
||||
|
||||
# =============================================================================
|
||||
# 函数调用关系图
|
||||
# =============================================================================
|
||||
# main
|
||||
# ├── acquire_lock
|
||||
# ├── init_log_system
|
||||
# └── main_backup_process
|
||||
# ├── check_command (多次调用)
|
||||
# ├── remote_execute_backup
|
||||
# │ └── execute_remote_command
|
||||
# ├── sync_backup_files
|
||||
# ├── encrypt_and_compress
|
||||
# │ └── check_command
|
||||
# ├── remote_cleanup_backup
|
||||
# │ └── execute_remote_command
|
||||
# └── local_cleanup
|
||||
127
0-部署应用/CloudCone-备份中心/firefly-iii-backup.sh
Normal file
127
0-部署应用/CloudCone-备份中心/firefly-iii-backup.sh
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env bash
|
||||
# =============================================================================
|
||||
# Meta : Firefly III 备份执行脚本
|
||||
# Version : 1.0.0
|
||||
# Author : Bash Shell Senior Development Engineer
|
||||
# License : MIT
|
||||
# Description : 自动化执行 Firefly III 远程备份、同步、加密、上传及清理任务。
|
||||
#
|
||||
# 本脚本基于公司备份中心的通用框架,使用 common.sh 中定义的日志、远程执行、
|
||||
# 加密与 rclone 函数实现。它假定 Firefly III 运行在名为 p3 的远程服务器
|
||||
# 上,并且容器使用 MariaDB 作为数据库。由于 p3 的磁盘 I/O 性能较差,
|
||||
# Firefly III 使用挂载在 /mnt/ramdisk 的 RAM 磁盘保存数据库和上传目录。
|
||||
# 为保证数据持久性,本脚本会定期将数据拉取到备份中心并上传到异地存储。
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# 引入公共库
|
||||
# -----------------------------------------------------------------------------
|
||||
source "$(dirname "$0")/common.sh" || { echo "FATAL: common.sh not found." >&2; exit 1; }
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# 配置区
|
||||
# -----------------------------------------------------------------------------
|
||||
readonly APP_NAME="FireflyIII"
|
||||
readonly REMOTE_USER="root"
|
||||
readonly REMOTE_HOST="p3"
|
||||
# 远程主机的 SSH 端口继承自 common.sh 中的 REMOTE_SSH_PORT
|
||||
readonly MAX_ENCRYPTED_REPLICAS=3 # 可根据需求调整远程保留的最大加密副本数
|
||||
|
||||
# MariaDB 容器名称及数据库凭据。建议通过环境变量传入,避免明文密码。
|
||||
readonly REMOTE_DB_CONTAINER="firefly_iii_db"
|
||||
# 从环境变量中读取数据库名称和凭据,默认值仅供示例使用。
|
||||
readonly DB_USER="${FIREFLY_DB_USER:-firefly}" # 数据库用户名
|
||||
readonly DB_PASSWORD="${FIREFLY_DB_PASSWORD:-ChangeThisPassword}" # 数据库密码
|
||||
readonly DB_NAME="${FIREFLY_DB_NAME:-firefly}" # 数据库名
|
||||
|
||||
# 远程路径:FireflyIII 在 p3 上的 RAM 磁盘挂载目录。
|
||||
readonly REMOTE_UPLOAD_DIR="/mnt/ramdisk/firefly_iii_upload"
|
||||
readonly REMOTE_DB_RAMDIR="/mnt/ramdisk/firefly_iii_db"
|
||||
|
||||
# 本地备份目录。根据备份中心目录结构自定义,用于暂存同步文件。
|
||||
readonly LOCAL_BACKUP_DIR="/data/p3_firefly_iii"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# 主执行流程
|
||||
# -----------------------------------------------------------------------------
|
||||
main() {
|
||||
trap 'log_message "ERROR" "${APP_NAME} 的备份任务出现错误! 终止"' ERR
|
||||
log_message "INFO" "====== 开始 ${APP_NAME} 备份任务 ======"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 1: 在远程服务器中创建数据库转储
|
||||
#
|
||||
# 使用 mariadb-dump 从容器中导出数据库,并将结果重定向到 RAM 磁盘目录。
|
||||
# 由于磁盘 IO 慢,转储文件直接写入 RAM 磁盘可以减少时间。
|
||||
# 注意: 为防止密码泄露,请保证 DB_PASSWORD 不在 shell 历史中。
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 1/7] 在远程 p3 上导出 Firefly III 数据库..."
|
||||
local dump_filename="firefly-db_backup_$(date +%Y%m%d-%H%M%S).sql"
|
||||
local remote_dump_path="${REMOTE_DB_RAMDIR}/${dump_filename}"
|
||||
local dump_cmd="docker exec ${REMOTE_DB_CONTAINER} mariadb-dump --single-transaction -h localhost -u ${DB_USER} -p'${DB_PASSWORD}' ${DB_NAME} > ${remote_dump_path}"
|
||||
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "${dump_cmd}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 2: Rsync 同步数据库转储和上传目录到备份中心
|
||||
#
|
||||
# 将远程的 RAM 磁盘数据复制到本地目录。这里创建子目录以避免命名冲突。
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 2/7] rsync 复制远程数据到备份中心..."
|
||||
mkdir -p "${LOCAL_BACKUP_DIR}/upload"
|
||||
# 同步上传目录
|
||||
rsync -avz --delete --progress -e "ssh -p ${REMOTE_SSH_PORT}" \
|
||||
"${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_UPLOAD_DIR}/" \
|
||||
"${LOCAL_BACKUP_DIR}/upload/"
|
||||
# 同步数据库转储
|
||||
rsync -avz --progress -e "ssh -p ${REMOTE_SSH_PORT}" \
|
||||
"${REMOTE_USER}@${REMOTE_HOST}:${remote_dump_path}" \
|
||||
"${LOCAL_BACKUP_DIR}/"
|
||||
|
||||
# 可选: 若需备份配置文件,可在此加入 rsync 命令复制 .env 和 docker-compose.yml
|
||||
# 例如:rsync -avz "${REMOTE_USER}@${REMOTE_HOST}:/path/to/compose/.env" "${LOCAL_BACKUP_DIR}/"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 3: 远程清理数据库转储
|
||||
#
|
||||
# 删除 p3 上的临时 .sql 文件,防止 RAM 磁盘占用。
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 3/7] 清理远程数据库转储文件..."
|
||||
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "rm -f ${remote_dump_path}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 4: 本地加密压缩
|
||||
#
|
||||
# 使用 7‑zip 加密并压缩整个本地备份目录。压缩包命名包含时间戳。
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 4/7] 使用 7zip 加密本地备份目录..."
|
||||
local archive_file="${SCRIPT_RUN_DIR}/${APP_NAME}-backup-$(date +%Y%m%d-%H%M%S).7z"
|
||||
encrypt_with_7zip "${LOCAL_BACKUP_DIR}" "${archive_file}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 5: rclone 上传加密包
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 5/7] 上传加密压缩包至冷存储 => ${RCLONE_REMOTE_REPO}..."
|
||||
rclone_copy "${archive_file}" "${RCLONE_REMOTE_REPO}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 6: 控制远程仓库副本数
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 6/7] 控制远程备份副本数量为 ${MAX_ENCRYPTED_REPLICAS}..."
|
||||
rclone_control_replicas "${RCLONE_REMOTE_REPO}" "${APP_NAME}-backup-" "${MAX_ENCRYPTED_REPLICAS}"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# 步骤 7: 本地清理
|
||||
#
|
||||
# 删除本地生成的加密压缩包,以节省磁盘空间。可根据需要删除同步目录。
|
||||
# -------------------------------------------------------------------------
|
||||
log_message "INFO" "[Step 7/7] 清理本地加密压缩包..."
|
||||
cleanup_local_encrypted_files "${SCRIPT_RUN_DIR}"
|
||||
# 可选: rm -rf "${LOCAL_BACKUP_DIR}"
|
||||
|
||||
log_message "INFO" "====== ${APP_NAME} 备份任务已全部完成! ======"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
BIN
0-部署应用/CloudCone-备份中心/备份中心文档说明-2026年1月6日.docx
Normal file
BIN
0-部署应用/CloudCone-备份中心/备份中心文档说明-2026年1月6日.docx
Normal file
Binary file not shown.
@@ -0,0 +1,4 @@
|
||||
MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
MYSQL_DATABASE=firefly
|
||||
MYSQL_USER=firefly
|
||||
MYSQL_PASSWORD=Te&he8d@qVuhp&ao # 与 .env 中 DB_PASSWORD 相同
|
||||
@@ -0,0 +1,23 @@
|
||||
APP_ENV=production
|
||||
APP_DEBUG=false
|
||||
APP_KEY=siQZ6t2tc3zkj3gl2yM0XEt9qOr5q7Zh # 必须恰好 32 位
|
||||
|
||||
# 数据库配置(与 .db.env 保持一致)
|
||||
DB_CONNECTION=mysql
|
||||
DB_HOST=db # Docker 内部主机名
|
||||
DB_PORT=3306
|
||||
DB_DATABASE=firefly
|
||||
DB_USERNAME=firefly
|
||||
DB_PASSWORD=Te&he8d@qVuhp&ao # 修改为强密码
|
||||
|
||||
# 中文国际化与时区设置
|
||||
DEFAULT_LANGUAGE=zh_CN
|
||||
DEFAULT_LOCALE=zh_CN
|
||||
TZ=Asia/Shanghai
|
||||
|
||||
# 网络配置
|
||||
APP_URL=http://129.146.65.80:21180
|
||||
TRUSTED_PROXIES=*
|
||||
|
||||
# 定时任务设置(可选,用于自动化财务计算)
|
||||
STATIC_CRON_TOKEN=eXkbwbyzlNcZAn8yi15AlHb0Jm2EGlTf # 必须恰好 32 位
|
||||
@@ -0,0 +1,52 @@
|
||||
version: '3'
|
||||
services:
|
||||
app:
|
||||
image: fireflyiii/core:latest
|
||||
container_name: firefly_iii_core
|
||||
restart: always
|
||||
ports:
|
||||
- "1800:8080"
|
||||
env_file: .env
|
||||
hostname: app
|
||||
networks:
|
||||
- firefly_iii
|
||||
depends_on:
|
||||
- db
|
||||
volumes:
|
||||
# 将上传目录映射到 RAM 磁盘
|
||||
- type: bind
|
||||
source: /mnt/ramdisk/firefly_iii_upload
|
||||
target: /var/www/html/storage/upload
|
||||
|
||||
db:
|
||||
image: mariadb:lts
|
||||
container_name: firefly_iii_db
|
||||
hostname: db
|
||||
restart: always
|
||||
env_file: .db.env
|
||||
networks:
|
||||
- firefly_iii
|
||||
volumes:
|
||||
# 将数据库目录映射到 RAM 磁盘
|
||||
- type: bind
|
||||
source: /mnt/ramdisk/firefly_iii_db
|
||||
target: /var/lib/mysql
|
||||
|
||||
cron:
|
||||
image: alpine
|
||||
container_name: firefly_iii_cron
|
||||
restart: always
|
||||
env_file: .env
|
||||
networks:
|
||||
- firefly_iii
|
||||
depends_on:
|
||||
- app
|
||||
command: >
|
||||
sh -c "apk add tzdata && \
|
||||
(ln -s /usr/share/zoneinfo/$$TZ /etc/localtime || true) && \
|
||||
echo '0 3 * * * wget -qO- http://app:8080/api/v1/cron/$$STATIC_CRON_TOKEN;echo' | crontab - && \
|
||||
crond -f -L /dev/stdout"
|
||||
|
||||
networks:
|
||||
firefly_iii:
|
||||
driver: bridge
|
||||
302
0-部署应用/Phoneix-arm64-01/个人财务-fireflyIII-2026年1月5日/部署说明文档.md
Normal file
302
0-部署应用/Phoneix-arm64-01/个人财务-fireflyIII-2026年1月5日/部署说明文档.md
Normal file
@@ -0,0 +1,302 @@
|
||||
## Firefly III 项目深度分析报告
|
||||
|
||||
### 项目概述
|
||||
|
||||
Firefly III 是一个功能完整的开源自托管个人财务管理系统。该项目采用 PHP + Laravel 框架开发,提供企业级的财务管理能力,同时完全避免数据上传至云端。作为一个 19.5k+ Star 的成熟开源项目,Firefly III 已经过长期的生产环境验证。
|
||||
|
||||
### 核心功能与架构
|
||||
|
||||
Firefly III 的架构围绕"复式记账法"设计,涵盖以下核心模块:
|
||||
|
||||
**账户管理体系** 支持六种账户类型:资产账户、共享资产账户(家庭账户)、储蓄账户、信用卡账户、债务账户(贷款/抵押贷款)。每个账户支持自定义名称、初始余额、币种配置,可精细化管理多币种交易。
|
||||
|
||||
**交易记录系统** 提供三种交易模式:收入(外部资金转入本账户)、支出(本账户资金转出)、转账(账户间资金移动)。这种三分法确保财务数据分析的准确性,避免转账被误计为收支。
|
||||
|
||||
**预算与分类框架** 集成预算管理、分类管理和标签系统。用户可为不同消费类别设定预算上限,系统以进度条形式实时展示支出占比,超出部分用红色警示。
|
||||
|
||||
### 财务报表能力 - 核心竞争力
|
||||
|
||||
Firefly III 提供六种原生报表类型,每种报表都支持灵活的参数定制:
|
||||
|
||||
| 报表类型 | 核心功能 | 应用场景 |
|
||||
|---------|--------|--------|
|
||||
| 默认财务报表 | 账户余额汇总、收支总览、预算概览 | 日常财务全景了解 |
|
||||
| 审计报表 | 逐笔交易前后余额追踪、错误排查 | 账务对账、数据验证 |
|
||||
| 支出/收入报表 | 对标相同名称的收支账户 | 税收统计、双向交易分析 |
|
||||
| 预算报表 | 预算执行情况、支出趋势线 | 预算实施评估 |
|
||||
| 分类报表 | 按消费类别统计收支分布 | 消费结构分析 |
|
||||
| 标签报表 | 按自定义标签聚合交易数据 | 灵活维度的支出追踪 |
|
||||
|
||||
**报表时间维度灵活性** 是 Firefly III 的独特优势:
|
||||
- 支持 1 天到 20 年的任意时间范围
|
||||
- 内置魔术词简化日期输入:`currentMonthStart`(本月开始)、`previousMonthEnd`(上月末)、`currentYearStart`(本年开始)、`previousYearStart`(去年开始)
|
||||
- 支持财政年度对比(若用户设置了财政年度参数)
|
||||
|
||||
**数据聚合与对比** 能力包括:
|
||||
- 多账户聚合:支持按 `allAssetAccounts` 包含所有资产账户
|
||||
- 多维度过滤:同时按预算、分类、标签进行交叉分析
|
||||
- URL 参数化:报表链接可直接参数化为书签或自动化工具
|
||||
|
||||
### 自托管部署方案
|
||||
|
||||
#### Docker Compose 部署(推荐)
|
||||
|
||||
Firefly III 官方提供完整的 Docker Compose 配置,三容器架构:
|
||||
|
||||
**容器组成**:
|
||||
1. **应用容器** (`fireflyiii/core:latest`):PHP-FPM + Laravel 应用层
|
||||
2. **数据库容器** (`mariadb:lts`):数据持久化层
|
||||
3. **定时任务容器** (`alpine`):cron 服务器,负责定时触发财务计算任务
|
||||
|
||||
**部署步骤**:
|
||||
|
||||
第一步:下载配置文件
|
||||
```bash
|
||||
# 从官方仓库获取 docker-compose.yml
|
||||
wget -O docker-compose.yml \
|
||||
https://raw.githubusercontent.com/firefly-iii/docker/main/docker-compose.yml
|
||||
|
||||
# 下载 .env 模板(Firefly III 环境变量)
|
||||
wget -O .env \
|
||||
https://raw.githubusercontent.com/firefly-iii/firefly-iii/main/.env.example
|
||||
|
||||
# 下载 .db.env 模板(数据库环境变量)
|
||||
wget -O .db.env \
|
||||
https://raw.githubusercontent.com/firefly-iii/docker/main/.db.env.example
|
||||
```
|
||||
|
||||
第二步:配置环境变量(.env 文件关键参数)
|
||||
|
||||
```env
|
||||
# 应用设置
|
||||
APP_ENV=production
|
||||
APP_DEBUG=false
|
||||
APP_KEY=XxXxXxXxXxXxXxXxXxXxXxXxXxXxXx # 必须恰好 32 位
|
||||
|
||||
# 数据库配置(与 .db.env 保持一致)
|
||||
DB_CONNECTION=mysql
|
||||
DB_HOST=db # Docker 内部主机名
|
||||
DB_PORT=3306
|
||||
DB_DATABASE=firefly
|
||||
DB_USERNAME=firefly
|
||||
DB_PASSWORD=your_secure_password_here # 修改为强密码
|
||||
|
||||
# 中文国际化与时区设置
|
||||
DEFAULT_LANGUAGE=zh_CN
|
||||
DEFAULT_LOCALE=zh_CN
|
||||
TZ=Asia/Shanghai
|
||||
|
||||
# 网络配置
|
||||
APP_URL=http://your-server-ip:port
|
||||
TRUSTED_PROXIES=*
|
||||
|
||||
# 定时任务设置(可选,用于自动化财务计算)
|
||||
STATIC_CRON_TOKEN=YyYyYyYyYyYyYyYyYyYyYyYyYyYyYy # 必须恰好 32 位
|
||||
```
|
||||
|
||||
第三步:配置数据库环境变量(.db.env 文件)
|
||||
|
||||
```env
|
||||
MYSQL_RANDOM_ROOT_PASSWORD=yes
|
||||
MYSQL_DATABASE=firefly
|
||||
MYSQL_USER=firefly
|
||||
MYSQL_PASSWORD=your_secure_password_here # 与 .env 中 DB_PASSWORD 相同
|
||||
```
|
||||
|
||||
第四步:启动服务
|
||||
```bash
|
||||
# 拉取最新镜像并以后台模式启动
|
||||
docker compose -f docker-compose.yml up -d --pull=always
|
||||
|
||||
# 查看启动日志确认初始化进度
|
||||
docker compose -f docker-compose.yml logs -f
|
||||
|
||||
# 首次启动时,等待数据库迁移完成(通常 1-2 分钟)
|
||||
# 日志中看到 "Thank you for installing Firefly III" 表示部署成功
|
||||
```
|
||||
|
||||
第五步:初始化与访问
|
||||
```
|
||||
访问地址:http://your-server-ip:port
|
||||
首次登录:需注册管理员账户(密码需 ≥16 位)
|
||||
语言设置:注册后在偏好设置中选择"中文简体"
|
||||
```
|
||||
|
||||
#### 替代部署方案
|
||||
|
||||
**单容器 Docker 模式** :适用于已有独立数据库的场景
|
||||
```bash
|
||||
docker run -d \
|
||||
-v firefly_iii_upload:/var/www/html/storage/upload \
|
||||
-p 80:8080 \
|
||||
-e APP_KEY=CHANGEME_32_CHARS \
|
||||
-e DB_HOST=your-db-host \
|
||||
-e DB_DATABASE=firefly \
|
||||
-e DB_USERNAME=firefly \
|
||||
-e DB_PASSWORD=password \
|
||||
fireflyiii/core:latest
|
||||
```
|
||||
|
||||
**NAS 原生部署** :极空间、群晖等支持:
|
||||
- 极空间:App Store 中搜索 Firefly III,一键部署
|
||||
- 群晖:Docker 套件 + docker-compose 部署
|
||||
|
||||
**一键部署平台支持**:
|
||||
- Tipi(个人服务器操作系统)
|
||||
- Umbrel(比特币/应用节点系统)
|
||||
- Cloudron(自托管应用平台)
|
||||
- Yunohost(去中心化互联网项目)
|
||||
- Lando(本地开发环境)
|
||||
|
||||
### 使用场景适配度评估
|
||||
|
||||
针对用户提出的五个使用场景逐一分析:
|
||||
|
||||
#### 场景 1:日常记录支出 ✅ 完全满足
|
||||
|
||||
**实现方式**:通过"创建交易"快速录入
|
||||
- 交易表单:支出金额 → 关联账户(微信、支付宝等) → 选择分类(食物、交通等) → 添加标签(按需)
|
||||
- 快速录入:仪表盘右上角 `+` 按钮,点击即录
|
||||
- 定期交易自动化:对于周期性支出(如订阅、话费),设置"定期交易",到期自动记账
|
||||
|
||||
**体验优化**:支持拆分账单(分期付款)、交易规则自动分类、多账户转账追踪
|
||||
|
||||
#### 场景 2:记录每月收入情况 ✅ 完全满足
|
||||
|
||||
**实现方式**:通过"收入"交易类型记录
|
||||
- 月度收入统计:分类报表可按月汇总所有收入来源
|
||||
- 收入来源追踪:每笔收入关联账户(如"工资账户"、"兼职收入"),便于分析多元收入结构
|
||||
- 收入趋势:分类报表可显示收入的月度变化趋势线
|
||||
|
||||
**数据验证**:审计报表可逐笔验证每月收入的账户影响
|
||||
|
||||
#### 场景 3:月度间收支情况对比展示 ✅ 完全满足
|
||||
|
||||
**实现方式**:利用报表的"日期范围"参数
|
||||
```
|
||||
对比方式一:默认财务报表
|
||||
- 生成本月报表:/reports/default/all/currentMonthStart/currentMonthEnd
|
||||
- 生成上月报表:/reports/default/all/previousMonthStart/previousMonthEnd
|
||||
- 用户可在浏览器中并行对比两份报表
|
||||
|
||||
对比方式二:分类报表(更细粒度)
|
||||
- 本月食物支出 vs 上月食物支出
|
||||
- URL 示例:/reports/category/1,2,3/5/currentMonthStart/currentMonthEnd
|
||||
```
|
||||
|
||||
**可视化呈现**:分类报表包含:
|
||||
- 柱状图:月度支出金额对比
|
||||
- 表格:各分类的月度明细
|
||||
- 趋势线:支出变化的发展方向
|
||||
|
||||
**实际场景示例** :
|
||||
- 预算报表实时展示进度条,超支部分变红,清晰呈现"本月用了多少、还剩多少"
|
||||
- 用户可快速对标上月预算执行情况
|
||||
|
||||
#### 场景 4:年份间收支情况对比展示 ✅ 完全满足
|
||||
|
||||
**实现方式**:利用年度魔术词参数
|
||||
```
|
||||
年度对比:
|
||||
- 本年数据:/reports/default/all/currentYearStart/currentYearEnd
|
||||
- 去年数据:/reports/default/all/previousYearStart/previousYearEnd
|
||||
- 财政年度对比:/reports/default/all/currentFiscalYearStart/currentFiscalYearEnd
|
||||
(需用户在偏好设置中设定财政年度起点,如 4 月 1 日)
|
||||
```
|
||||
|
||||
**分析维度**:
|
||||
- **全年收支总览**:默认报表按年份聚合所有账户的收入、支出、净储蓄
|
||||
- **按分类的年度对比**:分类报表可按年份比较各支出大类(如食物、交通)的同比增长
|
||||
- **预算执行评估**:预算报表可评估全年预算执行情况、累计偏差
|
||||
|
||||
**高级对比功能**:
|
||||
- 支持跨年度的自定义时间范围(如 2023-06-01 至 2024-05-31),用于分析"上一个财年"
|
||||
- 支持同时查看多个年份的分类数据(通过多次生成报表后手动对比或导出 Excel)
|
||||
|
||||
#### 场景 5:每月支出数据分析展示 ✅ 完全满足
|
||||
|
||||
**实现方式**:综合应用多种报表维度
|
||||
|
||||
| 分析维度 | 对应报表 | 输出内容 |
|
||||
|---------|--------|--------|
|
||||
| **消费结构分析** | 分类报表 | 各支出类别的占比、金额、占比变化 |
|
||||
| **消费热点识别** | 标签报表 | 按标签聚合(如"在线购物""外出聚餐") |
|
||||
| **商家消费频率** | 分类报表 按支出账户 | 在同一商家的累计消费金额、消费频率、平均金额 |
|
||||
| **预算执行分析** | 预算报表 | 预算使用率、超支原因、与目标的偏差 |
|
||||
| **支出趋势预测** | 分类报表 包含趋势线 | 支出的环比变化、是否存在上升/下降趋势 |
|
||||
| **账户流向分析** | 默认报表 + 审计报表 | 每个账户的月度收支平衡情况 |
|
||||
|
||||
**可视化输出** :
|
||||
- **图表展示**:柱状图(各分类支出对比)、饼图(支出结构占比)、折线图(支出趋势)
|
||||
- **表格明细**:可按金额、名称、反向排序,便于快速定位异常消费
|
||||
- **数据导出**:报表支持导出为 Excel/CSV,用于深度分析或共享
|
||||
|
||||
**实际分析场景示例** :
|
||||
```
|
||||
餐饮消费分析示例:
|
||||
1. 创建分类:"基本生活" → 子分类:"餐饮"
|
||||
2. 创建标签:"早餐""午餐""晚餐""外卖"
|
||||
3. 每笔餐饮支出同时关联"基本生活 - 餐饮" + 对应标签
|
||||
4. 分类报表:查看本月"餐饮"总支出 vs 预算
|
||||
5. 标签报表:看"外卖"支出占比、"早餐"消费频率
|
||||
6. 趋势:对比上月,餐饮支出是否增加
|
||||
7. 商家分析:按支出账户(餐厅名),找出最常消费的地点
|
||||
```
|
||||
|
||||
### Firefly III 的技术优势与局限
|
||||
|
||||
#### 优势
|
||||
|
||||
1. **数据隐私性** :完全自托管,无任何云端数据同步,满足隐私敏感用户的需求
|
||||
2. **API 完整性** :REST JSON API 覆盖几乎所有功能,支持二次开发与自动化集成
|
||||
3. **多币种支持** :原生支持任意币种,包含汇率转换功能
|
||||
4. **复式记账规范** :采用企业财务标准,确保数据一致性和审计可追溯性
|
||||
5. **扩展性** :生态成熟,包括第三方客户端(Abacus)、数据导入工具等
|
||||
|
||||
#### 当前局限
|
||||
|
||||
1. **定期交易汇率转换** :定期交易无法自动完成多币种汇率转换(如美金订阅自动扣款)
|
||||
2. **自动数据导入** :银行数据自动导入功能需要单独的 Data Importer 工具配置,初期有学习成本
|
||||
3. **移动应用缺失** :官方未提供原生移动应用,主要依赖响应式网页(但体验可接受)
|
||||
4. **报表二次修改** :生成的报表无法直接在 UI 中修改,需要通过 URL 参数调整
|
||||
|
||||
### 生产部署建议
|
||||
|
||||
#### 硬件需求
|
||||
|
||||
- **CPU**:1 核心(单用户)或 2 核心(家庭多用户)
|
||||
- **内存**:512 MB 基础 + 数据库缓存 200-500 MB(推荐 2 GB)
|
||||
- **存储**:100 GB 初期足够,视交易量增长后扩容
|
||||
- **网络**:仅需本地 Docker 网络,无需额外带宽
|
||||
|
||||
#### 备份与恢复策略
|
||||
|
||||
```bash
|
||||
# 数据库定期备份(每周一次)
|
||||
docker exec firefly_iii_db mysqldump -u firefly -p firefly > backup_$(date +%Y%m%d).sql
|
||||
|
||||
# 文件上传备份
|
||||
tar -czf firefly_uploads_$(date +%Y%m%d).tar.gz /path/to/firefly_iii_upload/
|
||||
|
||||
# 恢复数据库
|
||||
docker exec firefly_iii_db mysql -u firefly -p firefly < backup_20250105.sql
|
||||
```
|
||||
|
||||
#### 安全加固
|
||||
|
||||
1. **反向代理**:配置 Nginx/Caddy 提供 HTTPS,隐藏 Docker 端口
|
||||
2. **访问控制**:启用 2FA(双因素认证),生成长期 API Token 替代密码
|
||||
3. **定期更新**:`docker compose pull && docker compose up -d --build`
|
||||
|
||||
### 总体评估
|
||||
|
||||
| 评估维度 | 评分 | 说明 |
|
||||
|---------|------|------|
|
||||
| 功能完整性 | ⭐⭐⭐⭐⭐ | 覆盖所有个人财务管理场景,报表体系完善 |
|
||||
| 易用性 | ⭐⭐⭐⭐ | UI 清晰直观,中文支持完整,初期有学习曲线 |
|
||||
| 部署难度 | ⭐⭐⭐⭐⭐ | Docker Compose 一键部署,无需专业运维 |
|
||||
| 数据隐私 | ⭐⭐⭐⭐⭐ | 完全自托管,数据完全本地化 |
|
||||
| 社区活跃度 | ⭐⭐⭐⭐ | 19.5k+ Stars,更新频繁,讨论活跃 |
|
||||
| 性能表现 | ⭐⭐⭐⭐ | 单用户、小家庭无压力,百万级交易需优化 |
|
||||
|
||||
**结论**:Firefly III 是当前开源个人财务管理领域的最佳选择之一,特别适合注重数据隐私、需要复杂财务分析的用户。五个使用场景均被**完全满足**,相关功能已经成熟且经过生产验证。建议采用 Docker Compose 部署方案,可在任何支持 Docker 的环境(云服务器、NAS、家庭 PC)中快速启动。
|
||||
49
0-部署应用/Tokyo-amd64-01/gemini-proxy/docker-compose.yml
Normal file
49
0-部署应用/Tokyo-amd64-01/gemini-proxy/docker-compose.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
# gemini-proxy部署在Oracle-Tokyo-AMD64-01上面,数据库使用Oracle-Tokyo-ARM64-01的PostgreSQL
|
||||
gemini-balance:
|
||||
image: ghcr.io/snailyp/gemini-balance:latest
|
||||
container_name: gemini-balance
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "21800:8000"
|
||||
env_file:
|
||||
- .env
|
||||
# depends_on:
|
||||
# mysql:
|
||||
# condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "python -c \"import requests; exit(0) if requests.get('http://localhost:8000/health').status_code == 200 else exit(1)\""]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
# start_period: 10s
|
||||
# mysql:
|
||||
# image: mysql:8
|
||||
# container_name: gemini-balance-mysql
|
||||
# restart: unless-stopped
|
||||
# environment:
|
||||
# MYSQL_ROOT_PASSWORD: V2ryStr@Pass
|
||||
# MYSQL_DATABASE: ${MYSQL_DATABASE}
|
||||
# MYSQL_USER: ${MYSQL_USER}
|
||||
# MYSQL_PASSWORD: ${MYSQL_PASSWORD}
|
||||
# # ports:
|
||||
# # - "3306:3306"
|
||||
# volumes:
|
||||
# - mysql_data:/var/lib/mysql
|
||||
# healthcheck:
|
||||
# test: ["CMD", "mysqladmin", "ping", "-h", "127.0.0.1"]
|
||||
# interval: 10s # 每隔10秒检查一次
|
||||
# timeout: 5s # 每次检查的超时时间为5秒
|
||||
# retries: 3 # 重试3次失败后标记为 unhealthy
|
||||
# start_period: 30s # 容器启动后等待30秒再开始第一次健康检查
|
||||
# adminer:
|
||||
# image: adminer:latest
|
||||
# container_name: gemini-balance-adminer
|
||||
# restart: unless-stopped
|
||||
# ports:
|
||||
# - "8080:8080"
|
||||
# depends_on:
|
||||
# mysql:
|
||||
# condition: service_healthy
|
||||
107
0-部署应用/Tokyo-amd64-01/gemini-proxy/env.local
Normal file
107
0-部署应用/Tokyo-amd64-01/gemini-proxy/env.local
Normal file
@@ -0,0 +1,107 @@
|
||||
# 数据库配置
|
||||
DATABASE_TYPE=sqlite
|
||||
SQLITE_DATABASE=gemini-proxy
|
||||
# gemini-proxy部署在Oracle-Osaka-AMD64-01上面
|
||||
# MYSQL_HOST=10.0.0.193
|
||||
#MYSQL_SOCKET=/run/mysqld/mysqld.sock
|
||||
# MYSQL_PORT=5432
|
||||
# MYSQL_USER=bn_gitea
|
||||
# MYSQL_PASSWORD=Superwdd.12
|
||||
# MYSQL_DATABASE=gitea_db
|
||||
API_KEYS=["AIzaSyAZM_mPPWcFzf8c3TEFgxSBTwQQeu4k3wg","AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM","AIzaSyA52JsP4WtAAjHRI3WjyVsL4UJUlPg9IkE","AIzaSyDWb85Lv7qxz9XK0u3DKX35rY8OhN73nVM","AIzaSyCXZWTFRpx_5P_Yg7mIFj5atJsHyJl-yKw"]
|
||||
ALLOWED_TOKENS=["cs-sk-0c962c6a-80ac-46ed-8a22-d3770b1f868d"]
|
||||
AUTH_TOKEN=cs-sk-0c962c6a-80ac-46ed-8a22-d3770b1f868d
|
||||
|
||||
|
||||
# For Vertex AI Platform API Keys
|
||||
VERTEX_API_KEYS=["AQ.Abxxxxxxxxxxxxxxxxxxx"]
|
||||
# For Vertex AI Platform Express API Base URL
|
||||
VERTEX_EXPRESS_BASE_URL=https://aiplatform.googleapis.com/v1beta1/publishers/google
|
||||
TEST_MODEL=gemini-2.5-flash-lite
|
||||
THINKING_MODELS=["gemini-2.5-flash","gemini-2.5-pro"]
|
||||
THINKING_BUDGET_MAP={"gemini-2.5-flash": -1}
|
||||
IMAGE_MODELS=["gemini-2.0-flash-exp", "gemini-2.5-flash-image-preview"]
|
||||
SEARCH_MODELS=["gemini-2.5-flash","gemini-2.5-pro"]
|
||||
FILTERED_MODELS=["gemini-1.0-pro-vision-latest", "gemini-pro-vision", "chat-bison-001", "text-bison-001", "embedding-gecko-001"]
|
||||
# 是否启用网址上下文,默认启用
|
||||
URL_CONTEXT_ENABLED=false
|
||||
URL_CONTEXT_MODELS=["gemini-2.5-pro","gemini-2.5-flash","gemini-2.5-flash-lite","gemini-2.0-flash","gemini-2.0-flash-live-001"]
|
||||
TOOLS_CODE_EXECUTION_ENABLED=false
|
||||
SHOW_SEARCH_LINK=true
|
||||
SHOW_THINKING_PROCESS=true
|
||||
BASE_URL=https://generativelanguage.googleapis.com/v1beta
|
||||
MAX_FAILURES=5
|
||||
MAX_RETRIES=5
|
||||
CHECK_INTERVAL_HOURS=1
|
||||
TIMEZONE=Asia/Shanghai
|
||||
# 请求超时时间(秒)
|
||||
TIME_OUT=300
|
||||
# 代理服务器配置 (支持 http 和 socks5)
|
||||
# 示例: PROXIES=["http://user:pass@host:port", "socks5://host:port"]
|
||||
PROXIES=[]
|
||||
# 对同一个API_KEY使用代理列表中固定的IP策略
|
||||
PROXIES_USE_CONSISTENCY_HASH_BY_API_KEY=true
|
||||
|
||||
#########################image_generate 相关配置###########################
|
||||
# PAID_KEY=AIzaSyxxxxxxxxxxxxxxxxxxx
|
||||
# CREATE_IMAGE_MODEL=imagen-3.0-generate-002
|
||||
# UPLOAD_PROVIDER=smms
|
||||
# SMMS_SECRET_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
# PICGO_API_KEY=xxxx
|
||||
# PICGO_API_URL=https://www.picgo.net/api/1/upload
|
||||
# CLOUDFLARE_IMGBED_URL=https://xxxxxxx.pages.dev/upload
|
||||
# CLOUDFLARE_IMGBED_AUTH_CODE=xxxxxxxxx
|
||||
# CLOUDFLARE_IMGBED_UPLOAD_FOLDER=
|
||||
# 阿里云OSS配置
|
||||
# OSS_ENDPOINT=oss-cn-shanghai.aliyuncs.com
|
||||
# OSS_ENDPOINT_INNER=oss-cn-shanghai-internal.aliyuncs.com
|
||||
# OSS_ACCESS_KEY=LTAI5txxxxxxxxxxxxxxxx
|
||||
# OSS_ACCESS_KEY_SECRET=yXxxxxxxxxxxxxxxxxxxxxx
|
||||
# OSS_BUCKET_NAME=your-bucket-name
|
||||
# OSS_REGION=cn-shanghai
|
||||
##########################################################################
|
||||
#########################stream_optimizer 相关配置########################
|
||||
STREAM_OPTIMIZER_ENABLED=false
|
||||
STREAM_MIN_DELAY=0.016
|
||||
STREAM_MAX_DELAY=0.024
|
||||
STREAM_SHORT_TEXT_THRESHOLD=10
|
||||
STREAM_LONG_TEXT_THRESHOLD=50
|
||||
STREAM_CHUNK_SIZE=5
|
||||
##########################################################################
|
||||
######################### 日志配置 #######################################
|
||||
# 日志级别 (debug, info, warning, error, critical),默认为 info
|
||||
LOG_LEVEL=info
|
||||
# 是否记录错误日志的请求体(可能包含敏感信息),默认 false
|
||||
ERROR_LOG_RECORD_REQUEST_BODY=false
|
||||
# 是否开启自动删除错误日志
|
||||
AUTO_DELETE_ERROR_LOGS_ENABLED=true
|
||||
# 自动删除多少天前的错误日志 (1, 7, 30)
|
||||
AUTO_DELETE_ERROR_LOGS_DAYS=7
|
||||
# 是否开启自动删除请求日志
|
||||
AUTO_DELETE_REQUEST_LOGS_ENABLED=false
|
||||
# 自动删除多少天前的请求日志 (1, 7, 30)
|
||||
AUTO_DELETE_REQUEST_LOGS_DAYS=30
|
||||
##########################################################################
|
||||
|
||||
# 假流式配置 (Fake Streaming Configuration)
|
||||
# 是否启用假流式输出
|
||||
FAKE_STREAM_ENABLED=True
|
||||
# 假流式发送空数据的间隔时间(秒)
|
||||
FAKE_STREAM_EMPTY_DATA_INTERVAL_SECONDS=5
|
||||
|
||||
# 安全设置 (JSON 字符串格式)
|
||||
# 注意:这里的示例值可能需要根据实际模型支持情况调整
|
||||
SAFETY_SETTINGS=[{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "OFF"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "OFF"}, {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "OFF"}, {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "OFF"}, {"category": "HARM_CATEGORY_CIVIC_INTEGRITY", "threshold": "BLOCK_NONE"}]
|
||||
URL_NORMALIZATION_ENABLED=false
|
||||
# tts配置
|
||||
TTS_MODEL=gemini-2.5-flash-preview-tts
|
||||
TTS_VOICE_NAME=Zephyr
|
||||
TTS_SPEED=normal
|
||||
#########################Files API 相关配置########################
|
||||
# 是否启用文件过期自动清理
|
||||
FILES_CLEANUP_ENABLED=true
|
||||
# 文件过期清理间隔(小时)
|
||||
FILES_CLEANUP_INTERVAL_HOURS=1
|
||||
# 是否启用用户文件隔离(每个用户只能看到自己上传的文件)
|
||||
FILES_USER_ISOLATION_ENABLED=true
|
||||
##########################################################################
|
||||
@@ -9,6 +9,8 @@ services:
|
||||
- POSTGRESQL_USERNAME=bn_gitea
|
||||
- POSTGRESQL_PASSWORD=Superwdd.12
|
||||
# ALLOW_EMPTY_PASSWORD is recommended only for development.
|
||||
ports:
|
||||
- '5432:5432'
|
||||
gitea:
|
||||
image: docker.io/bitnami/gitea:1.24.5-debian-12-r0
|
||||
volumes:
|
||||
|
||||
Reference in New Issue
Block a user