Compare commits

...

13 Commits

Author SHA1 Message Date
zeaslity
837bdb4a91 完成Bitsflow家人云的迁移工作 2025-12-08 08:56:48 +08:00
zeaslity
dcc8afffba 完成Bitsflow家人云的迁移工作 2025-12-08 08:56:23 +08:00
zeaslity
9d93a1ee6e 完成CloudCone备份服务器的设置 2025-09-03 14:14:19 +08:00
zeaslity
b5e802ebc3 新增大量内容 2025-09-01 16:52:17 +08:00
zeaslity
49803fa5ac clash新规则-完美 2025-08-24 19:14:26 +08:00
zeaslity
b9be57adfc clash新规则 新增CN2GIA节点 2025-08-22 18:00:42 +08:00
zeaslity
4313a200c0 香港节点-新增转发 2025-08-21 14:45:37 +08:00
zeaslity
1dc9314773 新增日本节点 2025-08-20 17:00:46 +08:00
zeaslity
d6025287a0 大量更新 2025-07-10 16:49:54 +08:00
zeaslity
ee93d8dc8c 大量更新 2025-03-07 17:14:52 +08:00
zeaslity
21ff6a711d 大量的修改 2025-02-18 09:49:51 +08:00
zeaslity
7db1e0f565 大量更新 2025-01-20 16:38:08 +08:00
zeaslity
ed9e0947e6 1 2024-11-28 20:17:04 +08:00
152 changed files with 17438 additions and 6415 deletions

View File

@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="dataSourceStorageLocal" created-in="IU-242.23726.103">
<component name="dataSourceStorageLocal" created-in="IU-252.28238.7">
<data-source name="腾讯云-成都" uuid="79c9466f-d8a3-418a-b54a-f6e314306a0c">
<database-info product="MySQL" version="8.0.27" jdbc-version="4.2" driver-name="MySQL Connector/J" driver-version="mysql-connector-java-8.0.25 (Revision: 08be9e9b4cba6aa115f9b27b215887af40b159e0)" dbms="MYSQL" exact-version="8.0.27" exact-driver-version="8.0">
<extra-name-characters>#@</extra-name-characters>

2
.idea/modules.xml generated
View File

@@ -2,7 +2,7 @@
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/Shell.iml" filepath="$PROJECT_DIR$/.idea/Shell.iml" />
<module fileurl="file://$PROJECT_DIR$/.idea/shell-script.iml" filepath="$PROJECT_DIR$/.idea/shell-script.iml" />
</modules>
</component>
</project>

View File

@@ -0,0 +1,271 @@
#!/usr/bin/env bash
# =============================================================================
# Meta : 公共函数与变量库
# Version : 1.0.0
# Author : Bash Shell Senior Development Engineer
# License : MIT
# Description : 为备份脚本体系提供标准化的日志、远程执行、加解密及云存储管理功能。
# =============================================================================
#------------------------------------------------------------------------------
# 脚本严格模式
# -e: 命令失败时立即退出
# -u: 变量未定义时报错
# -o pipefail: 管道中任一命令失败则整个管道失败
#------------------------------------------------------------------------------
set -euo pipefail
IFS=$'\n\t'
#------------------------------------------------------------------------------
# 全局常量定义区
#------------------------------------------------------------------------------
# > 基础路径配置
readonly SCRIPT_RUN_DIR="/root/wdd/backup"
readonly LOG_DIR="/root/wdd/backup/logs"
# > 通用配置
readonly REMOTE_SSH_PORT="22333"
readonly ENCRYPTION_PASSWORD_7ZIP="SuperWdd.CCC.123" # !!!请务必修改为强密码!!!
readonly RCLONE_REMOTE_REPO="gd-zeaslity:CloneCone-BackUp" # rclone配置的远程仓库名及路径
# > 日志级别常量
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
# > 默认日志级别 (可被调用脚本覆盖)
CURRENT_LOG_LEVEL=${LOG_LEVEL_INFO}
# > 颜色输出定义
readonly C_RED='\033[0;31m'
readonly C_GREEN='\033[0;32m'
readonly C_YELLOW='\033[1;33m'
readonly C_BLUE='\033[0;34m'
readonly C_NC='\033[0m'
#------------------------------------------------------------------------------
# 模块依赖检查
#------------------------------------------------------------------------------
if ! command -v 7z &> /dev/null || ! command -v rclone &> /dev/null || ! command -v ssh &> /dev/null; then
echo -e "${C_RED}[ERROR] Essential commands (7z, rclone, ssh) are not installed. Aborting.${C_NC}" >&2
exit 1
fi
# =============================================================================
# 函数定义区
# =============================================================================
###
# 功能描述段: 记录标准化的分级日志
# @param level <string> 日志级别 (DEBUG/INFO/WARN/ERROR)
# @param message <string> 要记录的日志消息
# @return <0> 成功
# @require LOG_DIR, CURRENT_LOG_LEVEL
###
log_message() {
local level="$1"
local message="$2"
local log_level_value
local log_file
log_file="${LOG_DIR}/backup_$(date +%Y%m%d).log"
mkdir -p "${LOG_DIR}"
case "${level}" in
"DEBUG") log_level_value=${LOG_LEVEL_DEBUG} ;;
"INFO") log_level_value=${LOG_LEVEL_INFO} ;;
"WARN") log_level_value=${LOG_LEVEL_WARN} ;;
"ERROR") log_level_value=${LOG_LEVEL_ERROR} ;;
*) log_level_value=${LOG_LEVEL_INFO} ;;
esac
if [[ ${CURRENT_LOG_LEVEL} -le ${log_level_value} ]]; then
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local color_prefix="${C_GREEN}"
case "${level}" in
"DEBUG") color_prefix="${C_BLUE}" ;;
"INFO") color_prefix="${C_GREEN}" ;;
"WARN") color_prefix="${C_YELLOW}" ;;
"ERROR") color_prefix="${C_RED}" ;;
esac
# > 格式化日志条目
local log_entry
log_entry=$(printf "[%-5s] %s: %s" "${level}" "${timestamp}" "${message}")
# > 输出到标准输出/错误
echo -e "${color_prefix}${log_entry}${C_NC}"
# > INFO及以上级别写入日志文件
if [[ ${log_level_value} -ge ${LOG_LEVEL_INFO} ]]; then
echo "${log_entry}" >> "${log_file}"
fi
fi
return 0
}
###
# 功能描述段: 通过SSH在远程主机上安全地执行命令
# @param remote_user <string> 远程主机用户名
# @param remote_host <string> 远程主机名或IP地址
# @param remote_command <string> 待执行的命令
# @param ssh_port <string> SSH端口 (可选, 默认22333)
# @return <exit_code> 远程命令的退出码
# @require REMOTE_SSH_PORT, ssh client
###
execute_remote_command() {
local remote_user="$1"
local remote_host="$2"
local remote_command="$3"
local ssh_port=${4:-${REMOTE_SSH_PORT}}
log_message "DEBUG" "Executing on [${remote_user}@${remote_host}:${ssh_port}]: ${remote_command}"
ssh -p "${ssh_port}" "${remote_user}@${remote_host}" "${remote_command}"
local exit_code=$?
if [[ ${exit_code} -ne 0 ]]; then
log_message "ERROR" "Remote command failed with exit code ${exit_code}."
return ${exit_code}
fi
log_message "DEBUG" "Remote command executed successfully."
return 0
}
###
# 功能描述段: 使用7zip加密并压缩指定目录
# @param source_directory <string> 需要压缩的源目录路径
# @param archive_path <string> 生成的加密压缩包完整路径
# @return <0> 成功 | >0 失败
# @require ENCRYPTION_PASSWORD_7ZIP, 7z command
###
encrypt_with_7zip() {
local source_directory="$1"
local archive_path="$2"
if [[ ! -d "${source_directory}" ]]; then
log_message "ERROR" "Source directory for encryption does not exist: ${source_directory}"
return 1
fi
log_message "INFO" "Encrypting '${source_directory}' to '${archive_path}'..."
# > -mhe=on: 加密文件头, 防止泄露文件列表
# > -p: 指定密码
7z a -mhe=on -p"${ENCRYPTION_PASSWORD_7ZIP}" "${archive_path}" "${source_directory}"/*
local exit_code=$?
if [[ ${exit_code} -ne 0 ]]; then
log_message "ERROR" "7zip encryption failed with exit code ${exit_code}."
return ${exit_code}
fi
log_message "INFO" "Encryption completed successfully."
return 0
}
###
# 功能描述段: 使用rclone将本地文件复制到远程仓库
# @param source_file <string> 本地源文件路径
# @param remote_destination <string> rclone远程目标路径 (e.g., "google-drive:backup/app1/")
# @return <0> 成功 | >0 失败
# @require rclone command
###
rclone_copy() {
local source_file="$1"
local remote_destination="$2"
if [[ ! -f "${source_file}" ]]; then
log_message "ERROR" "Source file for rclone copy does not exist: ${source_file}"
return 1
fi
log_message "INFO" "Copying '${source_file}' to remote '${remote_destination}'..."
rclone copy -P "${source_file}" "${remote_destination}"
local exit_code=$?
if [[ ${exit_code} -ne 0 ]]; then
log_message "ERROR" "rclone copy failed with exit code ${exit_code}."
return ${exit_code}
fi
log_message "INFO" "rclone copy completed successfully."
return 0
}
###
# 功能描述段: 控制rclone远程仓库中的副本数量删除最旧的副本
# @param remote_path <string> 远程仓库中的目录路径
# @param file_prefix <string> 需要管理副本数量的文件名前缀
# @param max_replicas <integer> 允许保留的最大副本数量
# @return <0> 成功 | >0 失败
# @require rclone command
###
rclone_control_replicas() {
local remote_path="$1"
local file_prefix="$2"
local max_replicas="$3"
log_message "INFO" "Checking replicas for '${file_prefix}*' in '${remote_path}'. Max allowed: ${max_replicas}."
# > 获取远程文件列表及其修改时间
local remote_files
remote_files=$(rclone lsf --format "tp" "${remote_path}" | grep "${file_prefix}" || true)
if [[ -z "${remote_files}" ]]; then
log_message "INFO" "No remote files found with prefix '${file_prefix}'. Nothing to do."
return 0
fi
local file_count
file_count=$(echo "${remote_files}" | wc -l)
if [[ ${file_count} -le ${max_replicas} ]]; then
log_message "INFO" "Current replica count (${file_count}) is within the limit (${max_replicas})."
return 0
fi
local files_to_delete_count
files_to_delete_count=$((file_count - max_replicas))
log_message "WARN" "Exceeding replica limit. Need to delete ${files_to_delete_count} oldest file(s)."
# > 按时间排序并提取需要删除的文件名
local files_to_delete
files_to_delete=$(echo "${remote_files}" | sort -k2 | head -n "${files_to_delete_count}" | awk -F';' '{print $1}')
for file in ${files_to_delete}; do
log_message "INFO" "Deleting oldest replica: ${file}"
rclone deletefile "${remote_path}/${file}"
if [[ $? -ne 0 ]]; then
log_message "ERROR" "Failed to delete remote file: ${file}"
# > 继续尝试删除其他文件,不立即失败
fi
done
log_message "INFO" "Replica control process finished."
return 0
}
###
# 功能描述段: 清理指定目录下的所有.7z加密压缩包
# @param target_directory <string> 需要清理的目录路径
# @return <0> 成功
# @require find command
###
cleanup_local_encrypted_files() {
local target_directory="$1"
log_message "INFO" "Cleaning up local encrypted files (*.7z) in '${target_directory}'..."
find "${target_directory}" -maxdepth 1 -type f -name "*.7z" -delete
log_message "INFO" "Local cleanup finished."
return 0
}

View File

@@ -0,0 +1,264 @@
#!/usr/bin/env bash
#
# Gitea 远程备份脚本
# Author: System Administrator
# Version: 1.0.0
# License: MIT
#
# 功能描述通过SSH远程执行Gitea备份操作并将备份文件同步到本地
# 依赖要求ssh, rsync, docker, date, grep, awk 等基础工具
set -euo pipefail
IFS=$'\n\t'
################################################################################
# 全局常量定义区
################################################################################
readonly REMOTE_PORT="22333"
readonly REMOTE_HOST="t0"
readonly SCRIPT_DIR="/root/wdd/backup"
readonly REMOTE_GITEA_CONTAINER="gitea-gitea-1"
readonly REMOTE_GITEA_CONFIG="/bitnami/gitea/custom/conf/app.ini"
readonly REMOTE_BACKUP_SOURCE="/data/gitea/gitea_data/data/tmp/gitea-dump-*.zip"
readonly LOCAL_BACKUP_TARGET="/data/t0_150_230_198_103/gitea/"
# > 日志配置
readonly LOG_DIR="${SCRIPT_DIR}/logs"
readonly LOG_FILE="${LOG_DIR}/gitea_backup_$(date +%Y%m%d).log"
# 日志级别常量
readonly LOG_LEVEL_DEBUG=0
readonly LOG_LEVEL_INFO=1
readonly LOG_LEVEL_WARN=2
readonly LOG_LEVEL_ERROR=3
# 当前日志级别默认INFO
CURRENT_LOG_LEVEL=${LOG_LEVEL_INFO}
################################################################################
# 函数声明区
################################################################################
# 输出格式化日志信息(同时输出到控制台和日志文件)
# @param level string 日志级别DEBUG/INFO/WARN/ERROR
# @param message string 日志消息内容
# @return void
# @require CURRENT_LOG_LEVEL, LOG_FILE
log_message() {
local level="$1"
local message="$2"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local log_entry=""
case "$level" in
"DEBUG")
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_DEBUG} ]; then
log_entry="[DEBUG][${timestamp}] ${message}"
echo "${log_entry}"
echo "${log_entry}" >> "${LOG_FILE}"
fi
;;
"INFO")
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_INFO} ]; then
log_entry="[INFO][${timestamp}] ${message}"
echo "${log_entry}"
echo "${log_entry}" >> "${LOG_FILE}"
fi
;;
"WARN")
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_WARN} ]; then
log_entry="[WARN][${timestamp}] ${message}"
echo "${log_entry}" >&2
echo "${log_entry}" >> "${LOG_FILE}"
fi
;;
"ERROR")
if [ "${CURRENT_LOG_LEVEL}" -le ${LOG_LEVEL_ERROR} ]; then
log_entry="[ERROR][${timestamp}] ${message}"
echo "${log_entry}" >&2
echo "${log_entry}" >> "${LOG_FILE}"
fi
;;
*)
log_entry="[UNKNOWN][${timestamp}] ${message}"
echo "${log_entry}" >&2
echo "${log_entry}" >> "${LOG_FILE}"
;;
esac
}
###
# 执行远程SSH命令
# @param command string 需要执行的远程命令
# @return int 命令执行退出码
# @require REMOTE_HOST, REMOTE_PORT
execute_remote_command() {
local command="$1"
local exit_code
log_message "DEBUG" "执行远程命令: ${command}"
# > 通过SSH连接到远程主机执行命令
ssh -p "${REMOTE_PORT}" "${REMOTE_HOST}" "${command}"
exit_code=$?
if [ ${exit_code} -ne 0 ]; then
log_message "ERROR" "远程命令执行失败,退出码: ${exit_code}"
return ${exit_code}
fi
return 0
}
###
# 执行Gitea备份操作
# @return int 操作执行状态码
# @require REMOTE_GITEA_CONTAINER, REMOTE_GITEA_CONFIG
perform_gitea_backup() {
local backup_command="docker exec -i ${REMOTE_GITEA_CONTAINER} /opt/bitnami/gitea/bin/gitea dump -c ${REMOTE_GITEA_CONFIG}"
log_message "INFO" "开始执行Gitea备份..."
# > 执行Gitea dump命令生成备份文件
if ! execute_remote_command "${backup_command}"; then
log_message "ERROR" "Gitea备份命令执行失败"
return 1
fi
log_message "INFO" "Gitea备份命令执行成功"
return 0
}
###
# 重命名备份文件(添加时间戳)
# @return int 操作执行状态码
# @require REMOTE_GITEA_CONTAINER
rename_backup_file() {
local rename_command="docker exec -i ${REMOTE_GITEA_CONTAINER} /bin/sh -c \"mv /opt/bitnami/gitea/gitea-dump-*.zip /opt/bitnami/gitea/data/tmp/gitea-dump-\$(date +%Y%m%d-%H%M%S).zip\""
log_message "INFO" "开始重命名备份文件..."
# > 在容器内重命名备份文件,添加时间戳
if ! execute_remote_command "${rename_command}"; then
log_message "ERROR" "备份文件重命名失败"
return 1
fi
log_message "INFO" "备份文件重命名成功"
return 0
}
###
# 同步备份文件到本地
# @return int 操作执行状态码
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_BACKUP_SOURCE, LOCAL_BACKUP_TARGET
sync_backup_to_local() {
log_message "INFO" "开始同步备份文件到本地..."
# > 创建本地目标目录(如果不存在)
if [ ! -d "${LOCAL_BACKUP_TARGET}" ]; then
mkdir -p "${LOCAL_BACKUP_TARGET}"
log_message "DEBUG" "创建本地目录: ${LOCAL_BACKUP_TARGET}"
fi
# > 使用rsync同步文件保留关键属性
rsync -avz -e "ssh -p ${REMOTE_PORT}" \
"${REMOTE_HOST}:${REMOTE_BACKUP_SOURCE}" \
"${LOCAL_BACKUP_TARGET}"
local exit_code=$?
if [ ${exit_code} -ne 0 ]; then
log_message "ERROR" "rsync同步失败退出码: ${exit_code}"
return ${exit_code}
fi
log_message "INFO" "备份文件同步成功"
return 0
}
###
# 清理远程备份文件
# @return int 操作执行状态码
# @require REMOTE_BACKUP_SOURCE
cleanup_remote_backup() {
local cleanup_command="rm -f ${REMOTE_BACKUP_SOURCE}"
log_message "INFO" "开始清理远程备份文件..."
# > 删除远程主机上的临时备份文件
if ! execute_remote_command "${cleanup_command}"; then
log_message "ERROR" "远程备份文件清理失败"
return 1
fi
log_message "INFO" "远程备份文件清理成功"
return 0
}
###
# 主执行函数 - 协调整个备份流程
# @return int 脚本执行最终状态码
main() {
local overall_success=true
log_message "INFO" "=== Gitea备份流程开始 ==="
# 切换到工作目录
cd "${SCRIPT_DIR}" || {
log_message "ERROR" "无法切换到工作目录: ${SCRIPT_DIR}"
return 1
}
# 执行备份流程
if ! perform_gitea_backup; then
overall_success=false
fi
if ! rename_backup_file; then
overall_success=false
fi
if ! sync_backup_to_local; then
overall_success=false
fi
if ! cleanup_remote_backup; then
overall_success=false
fi
# 汇总执行结果
if [ "${overall_success}" = true ]; then
log_message "INFO" "=== Gitea备份流程完成 ==="
return 0
else
log_message "ERROR" "=== Gitea备份流程部分失败 ==="
return 1
fi
}
################################################################################
# 异常处理设置
################################################################################
# 设置trap捕获信号
trap 'log_message "ERROR" "脚本被中断"; exit 1' INT TERM
################################################################################
# 主执行流程
################################################################################
# 函数调用关系:
# main -> perform_gitea_backup -> execute_remote_command
# -> rename_backup_file -> execute_remote_command
# -> sync_backup_to_local
# -> cleanup_remote_backup -> execute_remote_command
# 执行主函数
if main; then
log_message "INFO" "脚本执行成功"
exit 0
else
log_message "ERROR" "脚本执行失败"
exit 1
fi

View File

@@ -0,0 +1,361 @@
#!/bin/bash
# =============================================================================
# nextcloud备份脚本
# 功能远程Nextcloud维护模式切换、数据库备份、文件同步及清理
# 版本1.0.0
# 作者Shell脚本工程师
# 许可证MIT License
# 依赖ssh, rsync, docker (远程主机), mariadb-client (远程主机)
# =============================================================================
set -euo pipefail
IFS=$'\n\t'
# > 全局常量定义
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_DIR="/root/wdd/backup"
readonly LOCK_FILE="/root/wdd/backup/${SCRIPT_NAME}.lock"
# > 远程主机配置
readonly REMOTE_HOST="s5"
readonly REMOTE_PORT="22333"
readonly REMOTE_USER="root"
readonly REMOTE_NEXTCLOUD_DIR="/data/nextcloud"
readonly REMOTE_DB_CONTAINER="nextcloud-db"
readonly REMOTE_WEB_CONTAINER="nextcloud_web"
# > 数据库配置
readonly DB_NAME="nextcloud"
readonly DB_USER="nextcloud"
readonly DB_PASSWORD="boge14@Level5"
# > 本地配置
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/nextcloud"
# > 日志配置
readonly LOG_DIR="${SCRIPT_DIR}/logs"
readonly LOG_FILE="${LOG_DIR}/nextcloud_backup_$(date +%Y%m%d).log"
# > 颜色输出定义
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
# =============================================================================
# 日志函数集
# =============================================================================
###
# 初始化日志系统
# @require 无
# @return 0 成功 | >0 失败
###
init_log_system() {
mkdir -p "${LOG_DIR}" || return 1
touch "${LOG_FILE}" || return 1
return 0
}
###
# 记录日志消息
# @param level string 日志级别DEBUG/INFO/WARN/ERROR
# @param message string 日志消息
# @require LOG_FILE
# @return 0 成功
###
log_message() {
local level="$1"
local message="$2"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
case "${level}" in
"DEBUG") echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"INFO") echo -e "${GREEN}[INFO]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"WARN") echo -e "${YELLOW}[WARN]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
"ERROR") echo -e "${RED}[ERROR]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
*) echo "${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
esac
return 0
}
# =============================================================================
# 工具函数集
# =============================================================================
###
# 检查命令是否存在
# @param command_name string 命令名称
# @require 无
# @return 0 存在 | 1 不存在
###
check_command() {
local command_name="$1"
if ! command -v "${command_name}" >/dev/null 2>&1; then
log_message "ERROR" "命令不存在: ${command_name}"
return 1
fi
return 0
}
###
# 执行远程SSH命令
# @param command string 要执行的命令
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER
# @return 远程命令的退出码
###
execute_remote_command() {
local command="$1"
ssh -p "${REMOTE_PORT}" "${REMOTE_USER}@${REMOTE_HOST}" "${command}"
return $?
}
###
# 创建锁文件防止并发执行
# @require LOCK_FILE
# @return 0 成功获取锁 | 1 锁已存在
###
acquire_lock() {
if [ -e "${LOCK_FILE}" ]; then
log_message "ERROR" "备份任务正在运行或异常退出,请检查锁文件: ${LOCK_FILE}"
return 1
fi
echo "$$" > "${LOCK_FILE}"
trap 'release_lock' EXIT
return 0
}
###
# 释放锁文件
# @require LOCK_FILE
# @return 0 成功
###
release_lock() {
[ -e "${LOCK_FILE}" ] && rm -f "${LOCK_FILE}"
return 0
}
# =============================================================================
# Nextcloud核心备份函数
# =============================================================================
###
# 启用Nextcloud维护模式
# @require execute_remote_command, REMOTE_WEB_CONTAINER
# @return 0 成功 | >0 失败
###
enable_maintenance_mode() {
log_message "INFO" "启用Nextcloud维护模式..."
local maintenance_cmd="docker exec -u www-data ${REMOTE_WEB_CONTAINER} php occ maintenance:mode --on"
if ! execute_remote_command "${maintenance_cmd}"; then
log_message "ERROR" "启用维护模式失败"
return 1
fi
log_message "INFO" "维护模式已启用"
return 0
}
###
# 禁用Nextcloud维护模式
# @require execute_remote_command, REMOTE_WEB_CONTAINER
# @return 0 成功 | >0 失败
###
disable_maintenance_mode() {
log_message "INFO" "禁用Nextcloud维护模式..."
local maintenance_cmd="docker exec -u www-data ${REMOTE_WEB_CONTAINER} php occ maintenance:mode --off"
if ! execute_remote_command "${maintenance_cmd}"; then
log_message "ERROR" "禁用维护模式失败"
return 1
fi
log_message "INFO" "维护模式已禁用"
return 0
}
###
# 远程执行MariaDB数据库备份
# @require execute_remote_command, REMOTE_DB_CONTAINER, DB_NAME, DB_USER, DB_PASSWORD, REMOTE_NEXTCLOUD_DIR
# @return 0 成功 | >0 失败
###
backup_database() {
log_message "INFO" "开始数据库备份..."
local backup_file="${REMOTE_NEXTCLOUD_DIR}/nextcloud-db_backup_$(date +%Y%m%d-%H%M%S).sql"
local backup_cmd="docker exec ${REMOTE_DB_CONTAINER} mariadb-dump --single-transaction -h localhost -u ${DB_USER} -p'${DB_PASSWORD}' ${DB_NAME} > ${backup_file}"
if ! execute_remote_command "${backup_cmd}"; then
log_message "ERROR" "数据库备份失败"
return 1
fi
# > 验证备份文件是否创建成功
local verify_cmd="[ -f \"${backup_file}\" ] && echo \"exists\" || echo \"missing\""
if [ "$(execute_remote_command "${verify_cmd}")" != "exists" ]; then
log_message "ERROR" "数据库备份文件创建失败"
return 1
fi
log_message "INFO" "数据库备份完成: ${backup_file}"
return 0
}
###
# 使用rsync同步Nextcloud文件到本地
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER, REMOTE_NEXTCLOUD_DIR, LOCAL_BACKUP_DIR
# @return 0 成功 | >0 失败
###
sync_nextcloud_files() {
log_message "INFO" "开始同步Nextcloud文件到本地..."
# > 创建本地暂存目录
mkdir -p "${LOCAL_BACKUP_DIR}" || {
log_message "ERROR" "创建本地暂存目录失败: ${LOCAL_BACKUP_DIR}"
return 1
}
# > 构建rsync命令
local rsync_cmd="rsync -avz --progress -e 'ssh -p ${REMOTE_PORT}'"
rsync_cmd+=" ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_NEXTCLOUD_DIR}/"
rsync_cmd+=" ${LOCAL_BACKUP_DIR}/"
# > 执行rsync同步
if ! eval "${rsync_cmd}"; then
log_message "ERROR" "Nextcloud文件同步失败"
return 1
fi
log_message "INFO" "Nextcloud文件同步完成"
return 0
}
###
# 远程删除数据库备份文件
# @require execute_remote_command, REMOTE_NEXTCLOUD_DIR
# @return 0 成功 | >0 失败
###
remote_cleanup_backup() {
log_message "INFO" "清理远程数据库备份文件..."
local cleanup_cmd="rm -f ${REMOTE_NEXTCLOUD_DIR}/nextcloud-db_backup_*.sql"
if ! execute_remote_command "${cleanup_cmd}"; then
log_message "ERROR" "远程清理失败"
return 1
fi
log_message "INFO" "远程清理完成"
return 0
}
###
# 清理本地暂存目录
# @require LOCAL_BACKUP_DIR
# @return 0 成功
###
local_cleanup() {
log_message "INFO" "清理本地暂存目录..."
[ -d "${LOCAL_BACKUP_DIR}" ] && rm -rf "${LOCAL_BACKUP_DIR}"
return 0
}
# =============================================================================
# 主执行流程
# =============================================================================
###
# 主备份流程
# @require 所有上述函数
# @return 0 成功 | >0 失败
###
main_backup_process() {
log_message "INFO" "=== 开始Nextcloud备份任务 ==="
# > 检查依赖命令
local required_commands=("ssh" "rsync")
for cmd in "${required_commands[@]}"; do
if ! check_command "${cmd}"; then
return 1
fi
done
# > 执行备份流程
local steps=(
enable_maintenance_mode
backup_database
sync_nextcloud_files
remote_cleanup_backup
disable_maintenance_mode
# local_cleanup
)
for step in "${steps[@]}"; do
if ! "${step}"; then
log_message "ERROR" "备份任务失败,正在尝试恢复..."
# > 尝试禁用维护模式
disable_maintenance_mode || true
return 1
fi
done
log_message "INFO" "=== Nextcloud备份任务完成 ==="
return 0
}
# =============================================================================
# 脚本入口点
# =============================================================================
# > 设置错误处理
trap 'log_message "ERROR" "脚本异常退出"; disable_maintenance_mode || true; release_lock; exit 1' ERR
# > 主执行块
main() {
if ! acquire_lock; then
exit 1
fi
if ! init_log_system; then
log_message "ERROR" "日志系统初始化失败"
exit 1
fi
if ! main_backup_process; then
log_message "ERROR" "备份任务执行失败"
exit 1
fi
release_lock
exit 0
}
# > 脚本执行入口
main "$@"
# =============================================================================
# 函数调用关系图
# =============================================================================
# main
# ├── acquire_lock
# ├── init_log_system
# └── main_backup_process
# ├── check_command (多次调用)
# ├── enable_maintenance_mode
# │ └── execute_remote_command
# ├── backup_database
# │ └── execute_remote_command
# ├── sync_nextcloud_files
# ├── move_to_backup_dir
# ├── remote_cleanup_backup
# │ └── execute_remote_command
# ├── disable_maintenance_mode
# │ └── execute_remote_command
# └── local_cleanup

View File

@@ -0,0 +1,342 @@
#!/bin/bash
# =============================================================================
# b-vault-warden备份脚本
# 功能远程执行Vaultwarden备份、同步备份文件、加密压缩及清理
# 版本1.0.0
# 作者Shell脚本工程师
# 许可证MIT License
# 依赖ssh, rsync, 7zip, docker (远程主机)
# =============================================================================
set -euo pipefail
IFS=$'\n\t'
# > 全局常量定义
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_DIR="/root/wdd/backup"
readonly LOCK_FILE="/root/wdd/backup/${SCRIPT_NAME}.lock"
# > 配置参数(可根据需要调整为环境变量)
readonly REMOTE_HOST="s5"
readonly REMOTE_PORT="22333"
readonly REMOTE_USER="root"
readonly REMOTE_BACKUP_CMD="docker exec vault-warden /vaultwarden backup"
readonly REMOTE_DATA_DIR="/data/vault-warden/persist-data"
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/vault_warden"
readonly BACKUP_PATTERNS=(
"config.json"
"rsa_key*"
"attachments"
"icon_cache"
"sends"
"db_*.sqlite3"
)
readonly ENCRYPTION_PASSWORD="SuperWdd.123" # > 请在实际使用时修改
# > 日志配置
readonly LOG_DIR="${SCRIPT_DIR}/logs"
readonly LOG_FILE="${LOG_DIR}/vault_warden_backup_$(date +%Y%m%d).log"
# > 颜色输出定义
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
# =============================================================================
# 日志函数集
# =============================================================================
###
# 初始化日志系统
# @require 无
# @return 0 成功 | >0 失败
###
init_log_system() {
mkdir -p "${LOG_DIR}" || return 1
touch "${LOG_FILE}" || return 1
return 0
}
###
# 记录日志消息
# @param level string 日志级别DEBUG/INFO/WARN/ERROR
# @param message string 日志消息
# @require LOG_FILE
# @return 0 成功
###
log_message() {
local level="$1"
local message="$2"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
case "${level}" in
"DEBUG") echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"INFO") echo -e "${GREEN}[INFO]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"WARN") echo -e "${YELLOW}[WARN]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
"ERROR") echo -e "${RED}[ERROR]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
*) echo "${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
esac
return 0
}
# =============================================================================
# 工具函数集
# =============================================================================
###
# 检查命令是否存在
# @param command_name string 命令名称
# @require 无
# @return 0 存在 | 1 不存在
###
check_command() {
local command_name="$1"
if ! command -v "${command_name}" >/dev/null 2>&1; then
log_message "ERROR" "命令不存在: ${command_name}"
return 1
fi
return 0
}
###
# 执行远程SSH命令
# @param command string 要执行的命令
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER
# @return 远程命令的退出码
###
execute_remote_command() {
local command="$1"
ssh -p "${REMOTE_PORT}" "${REMOTE_USER}@${REMOTE_HOST}" "${command}"
return $?
}
###
# 创建锁文件防止并发执行
# @require LOCK_FILE
# @return 0 成功获取锁 | 1 锁已存在
###
acquire_lock() {
if [ -e "${LOCK_FILE}" ]; then
log_message "ERROR" "备份任务正在运行或异常退出,请检查锁文件: ${LOCK_FILE}"
return 1
fi
echo "$$" > "${LOCK_FILE}"
trap 'release_lock' EXIT
return 0
}
###
# 释放锁文件
# @require LOCK_FILE
# @return 0 成功
###
release_lock() {
[ -e "${LOCK_FILE}" ] && rm -f "${LOCK_FILE}"
return 0
}
# =============================================================================
# 核心备份函数
# =============================================================================
###
# 远程执行Vaultwarden备份命令
# @require execute_remote_command, REMOTE_BACKUP_CMD
# @return 0 成功 | >0 失败
###
remote_execute_backup() {
log_message "INFO" "开始在远程主机执行Vaultwarden备份..."
if ! execute_remote_command "${REMOTE_BACKUP_CMD}"; then
log_message "ERROR" "远程备份命令执行失败"
return 1
fi
log_message "INFO" "远程备份命令执行成功"
return 0
}
###
# 使用rsync同步备份文件到本地
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER, REMOTE_DATA_DIR, LOCAL_BACKUP_DIR, BACKUP_PATTERNS
# @return 0 成功 | >0 失败
###
sync_backup_files() {
log_message "INFO" "开始同步备份文件到本地..."
# > 创建本地暂存目录
mkdir -p "${LOCAL_BACKUP_DIR}" || {
log_message "ERROR" "创建本地暂存目录失败: ${LOCAL_BACKUP_DIR}"
return 1
}
# > 构建rsync命令
local rsync_cmd="rsync -avz --progress -e 'ssh -p ${REMOTE_PORT}'"
for pattern in "${BACKUP_PATTERNS[@]}"; do
rsync_cmd+=" ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DATA_DIR}/${pattern}"
done
rsync_cmd+=" ${LOCAL_BACKUP_DIR}/"
# > 执行rsync同步
if ! eval "${rsync_cmd}"; then
log_message "ERROR" "文件同步失败"
return 1
fi
log_message "INFO" "文件同步完成"
return 0
}
###
# 使用7zip加密压缩备份文件
# @require LOCAL_BACKUP_DIR, LOCAL_BACKUP_DIR, ENCRYPTION_PASSWORD
# @return 0 成功 | >0 失败
###
encrypt_and_compress() {
log_message "INFO" "开始加密压缩备份文件..."
# > 检查7zip命令
if ! check_command "7z"; then
log_message "ERROR" "7zip命令不存在请安装p7zip-full包"
return 1
fi
# > 创建备份目录
mkdir -p "${LOCAL_BACKUP_DIR}" || {
log_message "ERROR" "创建备份目录失败: ${LOCAL_BACKUP_DIR}"
return 1
}
local backup_file="${LOCAL_BACKUP_DIR}/vaultwarden-backup-$(date +%Y%m%d-%H%M%S).7z"
# > 执行加密压缩
if ! (cd "${LOCAL_BACKUP_DIR}" && 7z a -p"${ENCRYPTION_PASSWORD}" -mhe=on "${backup_file}" . >/dev/null); then
log_message "ERROR" "加密压缩失败"
return 1
fi
log_message "INFO" "加密压缩完成: ${backup_file}"
return 0
}
###
# 远程删除备份数据库文件
# @require execute_remote_command, REMOTE_DATA_DIR
# @return 0 成功 | >0 失败
###
remote_cleanup_backup() {
log_message "INFO" "开始清理远程备份文件..."
local cleanup_cmd="rm -rf ${REMOTE_DATA_DIR}/db_*.sqlite3"
if ! execute_remote_command "${cleanup_cmd}"; then
log_message "ERROR" "远程清理失败"
return 1
fi
log_message "INFO" "远程清理完成"
return 0
}
###
# 清理本地暂存目录
# @require LOCAL_BACKUP_DIR
# @return 0 成功
###
local_cleanup() {
log_message "INFO" "清理本地暂存目录..."
[ -d "${LOCAL_BACKUP_DIR}" ] && rm -rf "${LOCAL_BACKUP_DIR}"
return 0
}
# =============================================================================
# 主执行流程
# =============================================================================
###
# 主备份流程
# @require 所有上述函数
# @return 0 成功 | >0 失败
###
main_backup_process() {
log_message "INFO" "=== 开始Vaultwarden备份任务 ==="
# > 检查依赖命令
local required_commands=("ssh" "rsync" "7z")
for cmd in "${required_commands[@]}"; do
if ! check_command "${cmd}"; then
return 1
fi
done
# > 执行备份流程
local steps=(
remote_execute_backup
sync_backup_files
encrypt_and_compress
remote_cleanup_backup
# local_cleanup
)
for step in "${steps[@]}"; do
if ! "${step}"; then
log_message "ERROR" "备份任务在第 ${#steps[@]} 步失败"
return 1
fi
done
log_message "INFO" "=== Vaultwarden备份任务完成 ==="
return 0
}
# =============================================================================
# 脚本入口点
# =============================================================================
# > 设置错误处理
trap 'log_message "ERROR" "脚本异常退出"; release_lock; exit 1' ERR
# > 主执行块
main() {
if ! acquire_lock; then
exit 1
fi
if ! init_log_system; then
log_message "ERROR" "日志系统初始化失败"
exit 1
fi
if ! main_backup_process; then
log_message "ERROR" "备份任务执行失败"
exit 1
fi
release_lock
exit 0
}
# > 脚本执行入口
main "$@"
# =============================================================================
# 函数调用关系图
# =============================================================================
# main
# ├── acquire_lock
# ├── init_log_system
# └── main_backup_process
# ├── check_command (多次调用)
# ├── remote_execute_backup
# │ └── execute_remote_command
# ├── sync_backup_files
# ├── encrypt_and_compress
# │ └── check_command
# ├── remote_cleanup_backup
# │ └── execute_remote_command
# └── local_cleanup

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env bash
# =============================================================================
# Meta : Gitea 备份执行脚本
# Version : 2.0.0
# Author : Bash Shell Senior Development Engineer
# License : MIT
# Description : 自动化执行Gitea远程备份、同步、加密、上传及清理任务。
# =============================================================================
source "$(dirname "$0")/common.sh" || { echo "FATAL: common.sh not found." >&2; exit 1; }
#------------------------------------------------------------------------------
# 脚本配置区
#------------------------------------------------------------------------------
readonly APP_NAME="Gitea"
readonly REMOTE_USER="root"
readonly REMOTE_HOST="t0"
readonly MAX_ENCRYPTED_REPLICAS=4
# > 远程配置
readonly REMOTE_CONTAINER="gitea-gitea-1"
readonly REMOTE_GITEA_CONF="/bitnami/gitea/custom/conf/app.ini"
readonly REMOTE_TMP_DIR="/opt/bitnami/gitea/data/tmp"
readonly REMOTE_RSYNC_SOURCE_DIR="/data/gitea/gitea_data/data/tmp/" # 注意末尾的斜杠
# > 本地路径
readonly LOCAL_BACKUP_DIR="/data/t0_150_230_198_103/gitea"
# =============================================================================
# 主执行流程
# =============================================================================
main() {
trap 'log_message "ERROR" "${APP_NAME}的备份任务出现错误! 终止"' ERR
log_message "INFO" "====== 开始 ${APP_NAME} 备份任务 ======"
# > 步骤 1: 执行Gitea备份命令
log_message "INFO" "[Step 1/8] 远程执行${APP_NAME} 备份 DUMP..."
local dump_cmd="docker exec ${REMOTE_CONTAINER} /opt/bitnami/gitea/bin/gitea dump -c ${REMOTE_GITEA_CONF}"
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "${dump_cmd}"
# > 步骤 2: 移动并重命名备份文件
log_message "INFO" "[Step 2/8] 移动并重命名备份文件..."
local new_filename="gitea-dump-$(date +%Y%m%d-%H%M%S).zip"
local move_cmd="docker exec ${REMOTE_CONTAINER} /bin/sh -c 'mv /opt/bitnami/gitea/gitea-dump-*.zip ${REMOTE_TMP_DIR}/${new_filename}'"
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "${move_cmd}"
# > 步骤 3: rsync复制备份文件到本地
log_message "INFO" "[Step 3/8] rsync复制备份文件到本地..."
mkdir -p "${LOCAL_BACKUP_DIR}"
rsync -avz --progress -e "ssh -p ${REMOTE_SSH_PORT}" \
"${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_RSYNC_SOURCE_DIR}${new_filename}" \
"${LOCAL_BACKUP_DIR}/"
# > 步骤 4: 远程清理备份文件
log_message "INFO" "[Step 4/8] Cleaning up remote dump file..."
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "rm -f ${REMOTE_RSYNC_SOURCE_DIR}gitea-dump-*.zip"
# > 步骤 5: 7zip加密
local archive_file="${SCRIPT_RUN_DIR}/${APP_NAME}-backup-$(date +%Y%m%d-%H%M%S).7z"
log_message "INFO" "[Step 5/8] 开始加密本地备份目录..."
encrypt_with_7zip "${LOCAL_BACKUP_DIR}" "${archive_file}"
# > 步骤 6: rclone上传
log_message "INFO" "[Step 6/8] 上传加密压缩包至冷存储 => ${RCLONE_REMOTE_REPO}..."
rclone_copy "${archive_file}" "${RCLONE_REMOTE_REPO}"
# > 步骤 7: 控制远程副本数
log_message "INFO" "[Step 7/8] 控制冷备份的副本数量 => ${MAX_ENCRYPTED_REPLICAS}..."
rclone_control_replicas "${RCLONE_REMOTE_REPO}" "${APP_NAME}-backup-" "${MAX_ENCRYPTED_REPLICAS}"
# > 步骤 8: 清理本地
log_message "INFO" "[Step 8/8] 清理本地压缩包..."
cleanup_local_encrypted_files "${SCRIPT_RUN_DIR}"
# rm -rf "${LOCAL_BACKUP_DIR}"
log_message "INFO" "====== ${APP_NAME} 备份任务已全部完成! ======"
}
# =============================================================================
# 脚本入口点
# =============================================================================
# 函数调用关系图
# main
# ├─ execute_remote_command (4)
# ├─ encrypt_with_7zip
# ├─ rclone_copy
# ├─ rclone_control_replicas
# └─ cleanup_local_encrypted_files
# =============================================================================
main "$@"

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env bash
# =============================================================================
# Meta : NextCloud 备份执行脚本
# Version : 2.0.0
# Author : Bash Shell Senior Development Engineer
# License : MIT
# Description : 自动化执行NextCloud维护模式切换、数据库和文件备份、加密、上传及清理。
# =============================================================================
source "$(dirname "$0")/common.sh" || { echo "FATAL: common.sh not found." >&2; exit 1; }
#------------------------------------------------------------------------------
# 脚本配置区
#------------------------------------------------------------------------------
readonly APP_NAME="NextCloud"
readonly REMOTE_USER="root"
readonly REMOTE_HOST="s5"
readonly MAX_ENCRYPTED_REPLICAS=3
# > 远程配置
readonly REMOTE_WEB_CONTAINER="nextcloud_web"
readonly REMOTE_DB_CONTAINER="nextcloud-db"
readonly REMOTE_DATA_DIR="/data/nextcloud"
readonly DB_USER="nextcloud"
readonly DB_PASSWORD="boge14@Level5" # 建议使用更安全的方式管理密码
readonly DB_NAME="nextcloud"
# > 本地路径
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/nextcloud"
# =============================================================================
# 核心函数
# =============================================================================
###
# 功能描述段: 切换Nextcloud维护模式 (on/off)
# @param mode <string> 模式, 'on' 或 'off'
# @return <0> 成功 | >0 失败
###
toggle_maintenance_mode() {
local mode="$1"
log_message "INFO" "Setting maintenance mode to '${mode}'..."
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" \
"docker exec -u www-data ${REMOTE_WEB_CONTAINER} php occ maintenance:mode --${mode}"
}
# =============================================================================
# 主执行流程
# =============================================================================
main() {
# > 设置陷阱,确保任何情况下都能尝试关闭维护模式
trap 'log_message "ERROR" "${APP_NAME}中止的备份任务。试图禁用维护模式..."; toggle_maintenance_mode "off" || true; exit 1' ERR
log_message "INFO" "${APP_NAME}的备份任务出现错误! 终止! "
# > 步骤 1: 启用维护模式
log_message "INFO" "[Step 1/8] 启用维护模式..."
toggle_maintenance_mode "on"
# > 步骤 2: 数据库备份
log_message "INFO" "[Step 2/8] 执行远程数据库备份..."
local db_backup_file="${REMOTE_DATA_DIR}/nextcloud-db_backup_$(date +%Y%m%d-%H%M%S).sql"
local db_backup_cmd="docker exec ${REMOTE_DB_CONTAINER} mariadb-dump --single-transaction -h localhost -u ${DB_USER} -p'${DB_PASSWORD}' ${DB_NAME} > ${db_backup_file}"
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "${db_backup_cmd}"
# > 步骤 3: rsync复制备份文件
log_message "INFO" "[Step 3/8] Srsync复制远程备份文件..."
mkdir -p "${LOCAL_BACKUP_DIR}"
rsync -avz --progress -e "ssh -p ${REMOTE_SSH_PORT}" \
"${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DATA_DIR}/" \
"${LOCAL_BACKUP_DIR}/"
# > 步骤 4: 远程清理数据库备份
log_message "INFO" "[Step 4/8] 远程清理数据库备份..."
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "rm -f ${REMOTE_DATA_DIR}/nextcloud-db_backup_*.sql"
# > 步骤 5: 禁用维护模式
log_message "INFO" "[Step 5/8] 禁用维护模式..."
toggle_maintenance_mode "off"
# > 步骤 6: 7zip加密
local archive_file="${SCRIPT_RUN_DIR}/${APP_NAME}-backup-$(date +%Y%m%d-%H%M%S).7z"
log_message "INFO" "[Step 6/8] 7zip加密本地目录..."
encrypt_with_7zip "${LOCAL_BACKUP_DIR}" "${archive_file}"
# > 步骤 7: rclone上传
log_message "INFO" "[Step 7/8] 上传加密压缩包至冷存储 => ${RCLONE_REMOTE_REPO}..."
rclone_copy "${archive_file}" "${RCLONE_REMOTE_REPO}"
# > 步骤 8: 控制副本数并清理本地
log_message "INFO" "[Step 8/8] 控制冷备份的副本数量 => ${MAX_ENCRYPTED_REPLICAS}..."
rclone_control_replicas "${RCLONE_REMOTE_REPO}" "${APP_NAME}-backup-" "${MAX_ENCRYPTED_REPLICAS}"
cleanup_local_encrypted_files "${SCRIPT_RUN_DIR}"
# rm -rf "${LOCAL_BACKUP_DIR}"
log_message "INFO" "====== ${APP_NAME} 备份任务已全部完成! ======"
}
# =============================================================================
# 脚本入口点
# =============================================================================
# 函数调用关系图
# main
# ├─ toggle_maintenance_mode (2)
# │ └─ execute_remote_command
# ├─ execute_remote_command (2)
# ├─ encrypt_with_7zip
# ├─ rclone_copy
# ├─ rclone_control_replicas
# └─ cleanup_local_encrypted_files
# =============================================================================
main "$@"

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env bash
# =============================================================================
# Meta : Vault-Warden 备份执行脚本
# Version : 2.0.0
# Author : Bash Shell Senior Development Engineer
# License : MIT
# Description : 自动化执行Vaultwarden远程备份、同步、加密、上传及清理任务。
# =============================================================================
# > 导入公共库
source "$(dirname "$0")/common.sh" || { echo "FATAL: common.sh not found." >&2; exit 1; }
#------------------------------------------------------------------------------
# 脚本配置区
#------------------------------------------------------------------------------
readonly APP_NAME="VaultWarden"
readonly REMOTE_USER="root"
readonly REMOTE_HOST="s5"
readonly MAX_ENCRYPTED_REPLICAS=5 # 远程保留的最大加密副本数
# > 远程路径
readonly REMOTE_BACKUP_CMD="docker exec vault-warden /vaultwarden backup"
readonly REMOTE_DATA_DIR="/data/vault-warden/persist-data"
readonly REMOTE_DB_BACKUP_GLOB="${REMOTE_DATA_DIR}/db_*.sqlite3"
# > 本地路径
readonly LOCAL_BACKUP_DIR="/data/s5_146-56-159-175/vault-warden"
# =============================================================================
# 主执行流程
# =============================================================================
main() {
trap 'log_message "ERROR" "${APP_NAME}的备份任务出现错误! 终止"' ERR
log_message "INFO" "====== 开始 ${APP_NAME} 备份任务 ======"
# > 步骤 1: 远程执行官方备份命令
log_message "INFO" "[Step 1/7] 远程执行官方备份命令..."
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "${REMOTE_BACKUP_CMD}"
# > 步骤 2: rsync复制备份文件到本地
log_message "INFO" "[Step 2/7] rsync复制备份文件到本地..."
mkdir -p "${LOCAL_BACKUP_DIR}"
rsync -avz --progress -e "ssh -p ${REMOTE_SSH_PORT}" \
"${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DATA_DIR}/" \
"${LOCAL_BACKUP_DIR}/" --include='db_*.sqlite3' --include='config.json' --include='rsa_key*' --include='attachments/***' --include='icon_cache/***' --include='sends/***' --exclude='*'
# > 步骤 3: 远程清理备份的数据库文件
log_message "INFO" "[Step 3/7] 远程清理备份的数据库文件..."
execute_remote_command "${REMOTE_USER}" "${REMOTE_HOST}" "rm -f ${REMOTE_DB_BACKUP_GLOB}"
# > 步骤 4: 7zip加密本地目录
local archive_file="${SCRIPT_RUN_DIR}/${APP_NAME}-backup-$(date +%Y%m%d-%H%M%S).7z"
log_message "INFO" "[Step 4/7] 7zip加密本地目录..."
encrypt_with_7zip "${LOCAL_BACKUP_DIR}" "${archive_file}"
# > 步骤 5: rclone上传压缩包
log_message "INFO" "[Step 5/7] 上传加密压缩包至冷存储 => ${RCLONE_REMOTE_REPO}..."
rclone_copy "${archive_file}" "${RCLONE_REMOTE_REPO}"
# > 步骤 6: 控制远程仓库副本数
log_message "INFO" "[Step 6/7] 控制冷备份的副本数量 => ${MAX_ENCRYPTED_REPLICAS}..."
rclone_control_replicas "${RCLONE_REMOTE_REPO}" "${APP_NAME}-backup-" "${MAX_ENCRYPTED_REPLICAS}"
# > 步骤 7: 清理本地加密压缩包
log_message "INFO" "[Step 7/7] 清理本地压缩包..."
cleanup_local_encrypted_files "${SCRIPT_RUN_DIR}"
# > 清理本地临时数据
rm -rf "${LOCAL_BACKUP_DIR}/db_*.sqlite3"
log_message "INFO" "====== ${APP_NAME} 备份任务已全部完成! ======"
}
# =============================================================================
# 脚本入口点
# =============================================================================
# 函数调用关系图
# main
# ├─ execute_remote_command (2)
# ├─ encrypt_with_7zip
# ├─ rclone_copy
# ├─ rclone_control_replicas
# └─ cleanup_local_encrypted_files
# =============================================================================
main "$@"

View File

@@ -0,0 +1,7 @@
64.69.32.106
购买日期 2026/08/29
购买价格 15美元/年

View File

@@ -0,0 +1,28 @@
需要备份的内容
S5 nextcloud
S5 vault-warden
T0 gitea
你是一名计算机领域的大师,你非常善于应用互联网上成熟的工具和自己编写一些小工具,对于数据安全备份具有深刻的理解。
请为以下的情况创建一套解决方法,只需要解决方案,不需要实际的代码内容
主机A的配置为2C 2GB 120GB用作备份服务器可以安装任何软件
主机B的配置为4C 24GB 100GB 已有docker-compose部署的nextcloud和vault-warden应用nextcloud的信息为版本nextcloud:27.0.1-apache本地映射的目录为/data/nextcloud:/var/www/html使用的数据库为image: mariadb:10.5同样需要备份数据库信息。其备份方法请严格参考https://docs.nextcloud.com/server/latest/admin_manual/maintenance/backup.html
vault-warden的信息为版本image: vaultwarden/server:1.34.3-alpine本地存储目录为/data/vault-warden/persist-data:/data需要备份attachments文件其备份方法请严格参考https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
主机C的配置为4C 24GB 100GB 已部署gitea应用gitea的信息为image: docker.io/bitnami/gitea:1.19.3-debian-11-r0,本次存储目录为/var/lib/docker/wdd/gitea/gitea_data:/bitnami/gitea其备份方法请严格参考https://docs.gitea.com/zh-tw/administration/backup-and-restore
主机A B C是三台独立的公网服务器他们之间的网络是互联互通的。
主机A B C之间已经配置使用root用户的免费登录
主机ABC上的业务均使用root用户启动
请给出方案,
1 求B C之上的nextcloud数据按照1周一次备份到C vault-warden数据按照1填一次备份到C gitea的数据按照1周一次备份到C
2 最好采用软件自带的方式导出,需要考虑数据恢复
3 备份传输最好考虑安全加密,但不是必要的。
4 最好使用rsync和rclone的方式第一备份为主机A第二备份请放置于OneDrive

View File

@@ -0,0 +1,108 @@
# 异地冷备份
## Rclone的方式
- 备份到GoogleDrive里面
抽取三个脚本中复用的函数及变量到公共的sh文件中
- 已知公共变量
1. 脚本运行目录为 /root/wdd/backup
2. 脚本运行日志目录为 /root/wdd/backup/logs
3. 远程主机的ssh端口均为22333
4. 默认日志级别
5. 7zip加密密码
6. rclone远程仓库地址
- 已知公共函数
1. 日志函数
1. 不同日志级别
2. INFO以上级别日志需要打印至文件中
2. 远程执行函数
1. 在远程主机执行命令
2. ssh端口缺省值为22
3. 7zip加密函数-将特定的备份目录,全部加密压缩为压缩包
4. rclone复制函数
1. 将本地的加密压缩包,上传到远端 rclone copy xxx.7z gd-zeaslity:CloneCone-BackUp
5. rclone远程仓库副本数控制函数
1. rclone检测google drive种压缩包的数量根据xxx_max_encrpted_replicas进行限制控制最大压缩包数量按照时间排序删除最早的压缩包
6. 本地加密压缩包清理函数
1. 删除特定目录下的 ***.7z压缩包
# Vault-Warden备份
## 备份说明
备份频率 每天一次 通过crontab执行 每天凌晨2点执行
备份副本数 最近3份
官方备份说明 https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
## 备份过程
1. 远程执行s5执行vault-warden官方备份命令
2. rsync复制s5主机上特定的备份文件到本地主机备份目录/data/s5_146-56-159-175/vault-warden/
3. 远程执行s5删除掉备份的数据库文件
4. 7zip加密函数 压缩本地目录
5. rclone复制函数 上传压缩包
6. rclone远程仓库副本数控制函数 根据vault-warden_max_encrpted_replicas控制远程的副本数量
7. 本地加密压缩包清理函数
# NextCloud备份
## 备份说明
备份频率 每周一次 通过crontab执行 每周日凌晨2点执行
备份副本数 最近1份
官方备份说明 https://docs.nextcloud.com/server/latest/admin_manual/maintenance/backup.html
## 备份过程
1. 远程执行s5启用维护模式 docker exec nextcloud_web php occ maintenance:mode --on
2. 远程执行s5数据库备份 (MariaDB) docker exec nextcloud-db mariadb-dump --single-transaction -h localhost -u nextcloud -p'boge14@Level5' nextcloud > /data/nextcloud/nextcloud-db_backup_$(date +%Y%m%d-%H%M%S).sql
3. rsync复制s5主机上下面的备份文件到本地主机目录/data/s5_146-56-159-175/nextcloud/
1. /data/nextcloud/*
4. 远程执行s5: 删除掉下面的文件
1. /data/nextcloud/nextcloud-db_backup_*.sql
5. 远程执行s5: 禁用维护模式 docker exec nextcloud_web php occ maintenance:mode --off
6. 7zip加密函数 压缩本地目录
7. rclone复制函数 上传压缩包
8. rclone远程仓库副本数控制函数 根据nextcloud_max_encrpted_replicas控制远程的副本数量
9. 本地加密压缩包清理函数
# Gitea备份
## 备份说明
备份频率 每周三 周六凌晨2点执行 /root/wdd/backup/gitea-backup.sh
备份副本数 最近3份
官方备份说明 https://docs.gitea.com/zh-tw/administration/backup-and-restore
## 备份过程
1. 远程执行t0: 执行gitea备份命令 docker exec -it gitea-gitea-1 /opt/bitnami/gitea/bin/gitea dump -c /bitnami/gitea/custom/conf/app.ini
2. 远程执行t0: 执行 docker exec -it gitea-gitea-1 /bin/sh -c "mv /opt/bitnami/gitea/gitea-dump-*.zip /opt/bitnami/gitea/data/tmp/gitea-dump-$(date +%Y%m%d-%H%M%S).zip"
3. rsync复制t0主机上的 /data/gitea/gitea_data/data/tmp/gitea-dump-*.zip ,到本地主机目录/data/t0_150_230_198_103/gitea/
4. 远程执行t0: 清除本地备份 rm /data/gitea/gitea_data/data/tmp/gitea-dump-*.zip
5. 7zip加密函数 压缩本地目录
6. rclone复制函数 上传压缩包
7. rclone远程仓库副本数控制函数 根据gitea_max_encrpted_replicas控制远程的副本数量
8. 本地加密压缩包清理函数
请在ubnutu环境下使用定时任务按照定时频率执行如下的任务
每周三 周六凌晨2点执行 /root/wdd/backup/gitea-backup.sh
每周日凌晨2点执行 /root/wdd/backup/nextcloud-backup.sh
每天凌晨2点执行 /root/wdd/backup/vault-warden-backup.sh
要求脚本的执行目录为 /root/wdd/backup
请给出crontab的命令 请给出查看定时任务执行日志的命令
# 添加以下内容:
# Gitea备份 - 每周三、周六凌晨2点
0 2 * * 3,6 cd /root/wdd/backup && /root/wdd/backup/gitea-backup.sh
# Nextcloud备份 - 每周日凌晨2点
0 2 * * 0 cd /root/wdd/backup && /root/wdd/backup/nextcloud-backup.sh
# Vaultwarden备份 - 每天凌晨2点
0 2 * * * cd /root/wdd/backup && /root/wdd/backup/vault-warden-backup.sh
查看cron服务的系统日志推荐方式
sudo grep CRON /var/log/syslog
# 或者使用journalctl查看系统日志
sudo journalctl -u cron.service --since today
# 查看特定备份脚本的执行日志(需要脚本内有日志输出)
sudo tail -f /var/log/syslog | grep -E "(gitea-backup|nextcloud-backup|vault-warden-backup)"
# 查看最近24小时的cron执行记录
sudo grep CRON /var/log/syslog | grep "$(date +'%b %e')"
# 查看特定日期的执行记录例如查看10月15日的记录
sudo grep CRON /var/log/syslog | grep "Oct 15"

View File

@@ -0,0 +1,22 @@
version: '3.8'
services:
crawl4ai:
image: unclecode/crawl4ai:basic
ports:
- "1235:11235"
environment:
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-} # Optional API security
- MAX_CONCURRENT_TASKS=${CRAWL4AI_API_TOKEN:-}
# LLM Provider Keys
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
volumes:
- /dev/shm:/dev/shm
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 1G

View File

@@ -0,0 +1,9 @@
# API Security (optional)
CRAWL4AI_API_TOKEN=EP53z52yx1r8k87G7y34AMojqpCHU4eMxO1MEGOBwa5mlDYe
# LLM Provider Keys
OPENAI_API_KEY=sk-proj-lCRIbBe3ex7VJP5GzAklT3BlbkFJbOcB4cXRQKk7pNZjBCHM
GEMINI_API_KEY=AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM
# Other Configuration
MAX_CONCURRENT_TASKS=5

View File

@@ -2,7 +2,7 @@ version: '3.9'
services:
chatgpt-next-web:
container_name: chatgpt-next-web
image: yidadaa/chatgpt-next-web
image: yidadaa/chatgpt-next-web:v2.16.0
ports:
- 3002:3000
environment:

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,6 @@
这篇文章揭露了豆粕市场崩盘背后的资本博弈和行业乱象,核心内容包括:
1. **资本操控**:豆粕价格暴跌并非单纯市场供需所致,而是压榨巨头和饲料企业为争夺定价权进行的资本博弈,散户成为牺牲品。
1. **需求疲软假象**:所谓的“需求疲软”实则是饲料企业为保利润调整配方,减少豆粕用量,而养殖户因成本上升和猪周期下行遭受重创。
1. **开工率谎言**:压榨企业通过“停机检修”人为控制供应,制造紧张假象以操纵价格,同时过度依赖进口大豆威胁国内农业生态和产业安全。
2. **库存猫腻**:“缺豆不缺粕”的异常现象暗示压榨企业和贸易商囤积居奇,通过信息不对称和低买高卖剥削散户。
1. **未来预测争议**卓创资讯对4月豆粕价格走势的预测被质疑可能受利益集团影响提醒投资者保持独立判断警惕市场陷阱。

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# 1GB 1048576 5G 1048576 8G 8388608
sudo modprobe brd rd_nr=1 rd_size=1048576 max_part=1
sudo mkfs.ext4 /dev/ram0
sudo mkdir /mnt/ramdisk
sudo mount /dev/ram0 /mnt/ramdisk
# 测试
touch /mnt/ramdisk/test.txt
# 清理
sudo umount /mnt/ramdisk

View File

@@ -1,17 +0,0 @@
# seoul arm
146.56.0.0/16
# tokyo arm
150.230.0.0/16
# tokyo seoul
140.238.0.0/16
# phonix send to boge
#144.24.0.0/16
# phonix amd
129.146.0.0/16
# osaka amd64
140.83.0.0/16
# tencent-shanghai
42.192.52.227/32
# tencent-hongkong
43.154.83.213/32

View File

@@ -0,0 +1,122 @@
server {
server_name dify.107421.xyz;
listen 80 ;
return 301 https://dify.107421.xyz$request_uri;
}
server {
listen 443 ssl;
server_name dify.107421.xyz;
ssl_certificate /etc/nginx/conf.d/ssl_key/dify.107421.xyz.cert.pem;
ssl_certificate_key /etc/nginx/conf.d/ssl_key/dify.107421.xyz.key.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
location /console/api {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /api {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /v1 {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /files {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /explore {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:3000;
}
location /e {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_set_header Dify-Hook-Url ://;
proxy_pass http://129.146.65.80:5002;
}
location / {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:3000;
}
}

View File

@@ -0,0 +1,564 @@
# ==================================================================
# WARNING: This file is auto-generated by generate_docker_compose
# Do not modify this file directly. Instead, update the .env.example
# or docker-compose-template.yaml and regenerate this file.
# ==================================================================
x-shared-env: &shared-api-worker-env
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
SERVICE_API_URL: ${SERVICE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
APP_WEB_URL: ${APP_WEB_URL:-}
FILES_URL: ${FILES_URL:-}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
LOG_TZ: ${LOG_TZ:-UTC}
DEBUG: ${DEBUG:-false}
FLASK_DEBUG: ${FLASK_DEBUG:-false}
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
INIT_PASSWORD: ${INIT_PASSWORD:-}
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
DIFY_PORT: ${DIFY_PORT:-5001}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
DB_USERNAME: ${DB_USERNAME:-postgres}
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
DB_HOST: ${DB_HOST:-db}
DB_PORT: ${DB_PORT:-5432}
DB_DATABASE: ${DB_DATABASE:-dify}
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_USERNAME: ${REDIS_USERNAME:-}
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
REDIS_DB: ${REDIS_DB:-0}
REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
REDIS_SENTINELS: ${REDIS_SENTINELS:-}
REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
S3_ENDPOINT: ${S3_ENDPOINT:-}
S3_REGION: ${S3_REGION:-us-east-1}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://<your_account_name>.blob.core.windows.net}
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com}
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com}
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
OCI_REGION: ${OCI_REGION:-us-ashburn-1}
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
MILVUS_USER: ${MILVUS_USER:-root}
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
MYSCALE_USER: ${MYSCALE_USER:-default}
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server}
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
CHROMA_PORT: ${CHROMA_PORT:-8000}
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
ORACLE_HOST: ${ORACLE_HOST:-oracle}
ORACLE_PORT: ${ORACLE_PORT:-1521}
ORACLE_USER: ${ORACLE_USER:-dify}
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
RELYT_HOST: ${RELYT_HOST:-db}
RELYT_PORT: ${RELYT_PORT:-5432}
RELYT_USER: ${RELYT_USER:-postgres}
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
KIBANA_PORT: ${KIBANA_PORT:-5601}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
ETL_TYPE: ${ETL_TYPE:-dify}
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
SENTRY_DSN: ${SENTRY_DSN:-}
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
MAIL_TYPE: ${MAIL_TYPE:-resend}
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
SMTP_SERVER: ${SMTP_SERVER:-}
SMTP_PORT: ${SMTP_PORT:-465}
SMTP_USERNAME: ${SMTP_USERNAME:-}
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3}
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
PGUSER: ${PGUSER:-${DB_USERNAME}}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_PORT: ${NGINX_PORT:-80}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5}
SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5}
SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5}
SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5}
EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
CSP_WHITELIST: ${CSP_WHITELIST:-}
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002}
PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002}
PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
services:
# API service
api:
image: langgenius/dify-api:1.1.3
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
dns:
- 1.1.1.1
- 8.8.8.8
volumes:
# Mount the storage directory to the container, for storing user files.
- /mnt/ramdisk/dify-api/storage:/app/api/storage
- /etc/resolv.conf:/etc/resolv.conf
network_mode: "host"
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:1.1.3
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
volumes:
# Mount the storage directory to the container, for storing user files.
- /mnt/ramdisk/dify-api/storage:/app/api/storage
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
network_mode: "host"
# Frontend web application.
web:
image: langgenius/dify-web:1.1.3
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
ports:
- 3000:3000
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.11
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- /mnt/ramdisk/sandbox/dependencies:/dependencies
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
network_mode: "host"
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.0.6-local
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0}
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5002}
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
network_mode: "host"
volumes:
- /mnt/ramdisk/plugin_daemon:/app/storage
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
# ssrf_proxy:
# image: ubuntu/squid:latest
# restart: always
# volumes:
# - /mnt/ramdisk/ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
# - /mnt/ramdisk/ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
# entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
# environment:
# # pls clearly modify the squid env vars to fit your network environment.
# HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
# COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
# REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
# SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
# SANDBOX_PORT: ${SANDBOX_PORT:-8194}
# networks:
# - ssrf_proxy_network
# - default
# networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
# ssrf_proxy_network:
# driver: bridge

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Modified based on Squid OCI image entrypoint
# This entrypoint aims to forward the squid logs to stdout to assist users of
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
# access the service logs.
# Moreover, it invokes the squid binary, leaving all the desired parameters to
# be provided by the "command" passed to the spawned container. If no command
# is provided by the user, the default behavior (as per the CMD statement in
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
# systemd unit.
# [1] The default configuration is changed in the Dockerfile to allow local
# network connections. See the Dockerfile for further information.
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
fi
tail -F /var/log/squid/access.log 2>/dev/null &
tail -F /var/log/squid/error.log 2>/dev/null &
tail -F /var/log/squid/store.log 2>/dev/null &
tail -F /var/log/squid/cache.log 2>/dev/null &
# Replace environment variables in the template and output to the squid.conf
echo "[ENTRYPOINT] replacing environment variables in the template"
awk '{
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
var = substr($0, RSTART+2, RLENGTH-3)
val = ENVIRON[var]
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
}
print
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
/usr/sbin/squid -Nz
echo "[ENTRYPOINT] starting squid"
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1

View File

@@ -0,0 +1,54 @@
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
http_access allow localhost
include /etc/squid/conf.d/*.conf
http_access deny all
################################## Proxy Server ################################
http_port ${HTTP_PORT}
coredump_dir ${COREDUMP_DIR}
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern . 0 20% 4320
# cache_dir ufs /var/spool/squid 100 16 256
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
################################## Reverse Proxy To Sandbox ################################
http_port ${REVERSE_PROXY_PORT} accel vhost
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
acl src_all src all
http_access allow src_all
docker exec -it dify_api_1 telnet 10.0.0.247 6379

View File

@@ -0,0 +1,743 @@
# ------------------------------
# Environment Variables for API service & worker
# ------------------------------
# ------------------------------
# Common Variables
# ------------------------------
# The backend URL of the console API,
# used to concatenate the authorization callback.
# If empty, it is the same domain.
# Example: https://api.console.dify.ai
CONSOLE_API_URL=
# The front-end URL of the console web,
# used to concatenate some front-end addresses and for CORS configuration use.
# If empty, it is the same domain.
# Example: https://console.dify.ai
CONSOLE_WEB_URL=
# Service API Url,
# used to display Service API Base Url to the front-end.
# If empty, it is the same domain.
# Example: https://api.dify.ai
SERVICE_API_URL=
# WebApp API backend Url,
# used to declare the back-end URL for the front-end API.
# If empty, it is the same domain.
# Example: https://api.app.dify.ai
APP_API_URL=
# WebApp Url,
# used to display WebAPP API Base Url to the front-end.
# If empty, it is the same domain.
# Example: https://app.dify.ai
APP_WEB_URL=
# File preview or download Url prefix.
# used to display File preview or download Url to the front-end or as Multi-model inputs;
# Url is signed and has expiration time.
FILES_URL=
# ------------------------------
# Server Configuration
# ------------------------------
# The log level for the application.
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL=INFO
# Log file path
LOG_FILE=/root/app-install/dify/logs/server.log
# Log file max size, the unit is MB
LOG_FILE_MAX_SIZE=50
# Log file max backup count
LOG_FILE_BACKUP_COUNT=5
# Log dateformat
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
# Log Timezone
LOG_TZ=Asia/Shanghai
# Debug mode, default is false.
# It is recommended to turn on this configuration for local development
# to prevent some problems caused by monkey patch.
DEBUG=false
# Flask debug mode, it can output trace information at the interface when turned on,
# which is convenient for debugging.
FLASK_DEBUG=false
# A secretkey that is used for securely signing the session cookie
# and encrypting sensitive information on the database.
# You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY=bBj28uxctAwybtLFUr1Zlc3OKlTG5SsUiz+W9v71s0+YytuD8+Um8Qdy
# Password for admin user initialization.
# If left unset, admin user will not be prompted for a password
# when creating the initial admin account.
# The length of the password cannot exceed 30 charactors.
INIT_PASSWORD=loveff.cxc.23
# Deployment environment.
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
# Testing environment. There will be a distinct color label on the front-end page,
# indicating that this environment is a testing environment.
DEPLOY_ENV=PRODUCTION
# Whether to enable the version check policy.
# If set to empty, https://updates.dify.ai will be called for version check.
CHECK_UPDATE_URL=https://updates.dify.ai
# Used to change the OpenAI base address, default is https://api.openai.com/v1.
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
# or when a local model provides OpenAI compatible API, it can be replaced.
OPENAI_API_BASE=https://api.openai.com/v1
# When enabled, migrations will be executed prior to application startup
# and the application will start after the migrations have completed.
MIGRATION_ENABLED=true
# File Access Time specifies a time interval in seconds for the file to be accessed.
# The default value is 300 seconds.
FILES_ACCESS_TIMEOUT=300
# Access token expiration time in minutes
ACCESS_TOKEN_EXPIRE_MINUTES=60
# Refresh token expiration time in days
REFRESH_TOKEN_EXPIRE_DAYS=30
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
APP_MAX_ACTIVE_REQUESTS=0
APP_MAX_EXECUTION_TIME=1200
# ------------------------------
# Container Startup Related Configuration
# Only effective when starting with docker image or docker-compose.
# ------------------------------
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
DIFY_BIND_ADDRESS=0.0.0.0
# API service binding port number, default 5001.
DIFY_PORT=5001
# The number of API server workers, i.e., the number of workers.
# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
SERVER_WORKER_AMOUNT=2
# Defaults to gevent. If using windows, it can be switched to sync or solo.
SERVER_WORKER_CLASS=gevent
# Default number of worker connections, the default is 10.
SERVER_WORKER_CONNECTIONS=10
# Similar to SERVER_WORKER_CLASS.
# If using windows, it can be switched to sync or solo.
CELERY_WORKER_CLASS=
# Request handling timeout. The default is 200,
# it is recommended to set it to 360 to support a longer sse connection time.
GUNICORN_TIMEOUT=360
# The number of Celery workers. The default is 1, and can be set as needed.
CELERY_WORKER_AMOUNT=
# Flag indicating whether to enable autoscaling of Celery workers.
#
# Autoscaling is useful when tasks are CPU intensive and can be dynamically
# allocated and deallocated based on the workload.
#
# When autoscaling is enabled, the maximum and minimum number of workers can
# be specified. The autoscaling algorithm will dynamically adjust the number
# of workers within the specified range.
#
# Default is false (i.e., autoscaling is disabled).
#
# Example:
# CELERY_AUTO_SCALE=true
CELERY_AUTO_SCALE=true
# The maximum number of Celery workers that can be autoscaled.
# This is optional and only used when autoscaling is enabled.
# Default is not set.
CELERY_MAX_WORKERS=3
# The minimum number of Celery workers that can be autoscaled.
# This is optional and only used when autoscaling is enabled.
# Default is not set.
CELERY_MIN_WORKERS=1
# API Tool configuration
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
API_TOOL_DEFAULT_READ_TIMEOUT=60
# ------------------------------
# Database Configuration
# The database uses PostgreSQL. Please use the public schema.
# It is consistent with the configuration in the 'db' service below.
# ------------------------------
DB_USERNAME=postgres
DB_PASSWORD=V2rayStrP@ss
DB_HOST=10.0.0.247
DB_PORT=5432
DB_DATABASE=dify
# The size of the database connection pool.
# The default is 30 connections, which can be appropriately increased.
SQLALCHEMY_POOL_SIZE=30
# Database connection pool recycling time, the default is 3600 seconds.
SQLALCHEMY_POOL_RECYCLE=3600
# Whether to print SQL, default is false.
SQLALCHEMY_ECHO=false
# Maximum number of connections to the database
# Default is 100
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
POSTGRES_MAX_CONNECTIONS=100
# Sets the amount of shared memory used for postgres's shared buffers.
# Default is 128MB
# Recommended value: 25% of available memory
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
POSTGRES_SHARED_BUFFERS=4096MB
# Sets the amount of memory used by each database worker for working space.
# Default is 4MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
POSTGRES_WORK_MEM=64MB
# Sets the amount of memory reserved for maintenance activities.
# Default is 64MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
POSTGRES_MAINTENANCE_WORK_MEM=128MB
# Sets the planner's assumption about the effective cache size.
# Default is 4096MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
POSTGRES_EFFECTIVE_CACHE_SIZE=8192MB
# ------------------------------
# Redis Configuration
# This Redis configuration is used for caching and for pub/sub during conversation.
# ------------------------------
REDIS_HOST=10.0.0.247
REDIS_PORT=6379
REDIS_USERNAME=
REDIS_PASSWORD=V2rayStrP@ss
REDIS_USE_SSL=false
REDIS_DB=0
# Whether to use Redis Sentinel mode.
# If set to true, the application will automatically discover and connect to the master node through Sentinel.
REDIS_USE_SENTINEL=false
# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port.
# Format: `<sentinel1_ip>:<sentinel1_port>,<sentinel2_ip>:<sentinel2_port>,<sentinel3_ip>:<sentinel3_port>`
REDIS_SENTINELS=
REDIS_SENTINEL_SERVICE_NAME=
REDIS_SENTINEL_USERNAME=
REDIS_SENTINEL_PASSWORD=
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port.
# Format: `<Cluster1_ip>:<Cluster1_port>,<Cluster2_ip>:<Cluster2_port>,<Cluster3_ip>:<Cluster3_port>`
REDIS_USE_CLUSTERS=false
REDIS_CLUSTERS=
REDIS_CLUSTERS_PASSWORD=
# ------------------------------
# Celery Configuration
# ------------------------------
# Use redis as the broker, and redis db 1 for celery broker.
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`
# Example: redis://:V2rayStrP@ss@redis:6379/1
# If use Redis Sentinel, format as follows: `sentinel://<sentinel_username>:<sentinel_password>@<sentinel_host>:<sentinel_port>/<redis_database>`
# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1
CELERY_BROKER_URL=redis://:V2rayStrP@ss@10.0.0.247:6379/1
BROKER_USE_SSL=false
# If you are using Redis Sentinel for high availability, configure the following settings.
CELERY_USE_SENTINEL=false
CELERY_SENTINEL_MASTER_NAME=
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
# ------------------------------
# CORS Configuration
# Used to set the front-end cross-domain access policy.
# ------------------------------
# Specifies the allowed origins for cross-origin requests to the Web API,
# e.g. https://dify.app or * for all origins.
WEB_API_CORS_ALLOW_ORIGINS=*
# Specifies the allowed origins for cross-origin requests to the console API,
# e.g. https://cloud.dify.ai or * for all origins.
CONSOLE_CORS_ALLOW_ORIGINS=*
# ------------------------------
# File Storage Configuration
# ------------------------------
# The type of storage to use for storing user files.
STORAGE_TYPE=opendal
# Apache OpenDAL Configuration
# The configuration for OpenDAL consists of the following format: OPENDAL_<SCHEME_NAME>_<CONFIG_NAME>.
# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services.
# Dify will scan configurations starting with OPENDAL_<SCHEME_NAME> and automatically apply them.
# The scheme name for the OpenDAL storage.
OPENDAL_SCHEME=fs
# Configurations for OpenDAL Local File System.
OPENDAL_FS_ROOT=storage
# S3 Configuration
#
S3_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com
S3_REGION=us-phoenix-1
S3_BUCKET_NAME=phoenix-10
S3_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2
S3_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk=
# Whether to use AWS managed IAM roles for authenticating with the S3 service.
# If set to false, the access key and secret key must be provided.
S3_USE_AWS_MANAGED_IAM=false
# Azure Blob Configuration
#
AZURE_BLOB_ACCOUNT_NAME=difyai
AZURE_BLOB_ACCOUNT_KEY=difyai
AZURE_BLOB_CONTAINER_NAME=difyai-container
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
# Google Storage Configuration
#
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=
# The Alibaba Cloud OSS configurations,
#
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
ALIYUN_OSS_ACCESS_KEY=your-access-key
ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
ALIYUN_OSS_REGION=ap-southeast-1
ALIYUN_OSS_AUTH_VERSION=v4
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
# Tencent COS Configuration
#
TENCENT_COS_BUCKET_NAME=your-bucket-name
TENCENT_COS_SECRET_KEY=your-secret-key
TENCENT_COS_SECRET_ID=your-secret-id
TENCENT_COS_REGION=your-region
TENCENT_COS_SCHEME=your-scheme
# Oracle Storage Configuration
#
OCI_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com
OCI_BUCKET_NAME=phoenix-10
OCI_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2
OCI_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk=
OCI_REGION=us-phoenix-1
# Huawei OBS Configuration
#
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
HUAWEI_OBS_SECRET_KEY=your-secret-key
HUAWEI_OBS_ACCESS_KEY=your-access-key
HUAWEI_OBS_SERVER=your-server-url
# Volcengine TOS Configuration
#
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
VOLCENGINE_TOS_ENDPOINT=your-server-url
VOLCENGINE_TOS_REGION=your-region
# Baidu OBS Storage Configuration
#
BAIDU_OBS_BUCKET_NAME=your-bucket-name
BAIDU_OBS_SECRET_KEY=your-secret-key
BAIDU_OBS_ACCESS_KEY=your-access-key
BAIDU_OBS_ENDPOINT=your-server-url
# Supabase Storage Configuration
#
SUPABASE_BUCKET_NAME=your-bucket-name
SUPABASE_API_KEY=your-access-key
SUPABASE_URL=your-server-url
# ------------------------------
# Vector Database Configuration
# ------------------------------
# The type of vector store to use.
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
VECTOR_STORE=weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT=http://10.0.0.247:8080
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL=http://qdrant:6333
QDRANT_API_KEY=V2rayStrP@ss
QDRANT_CLIENT_TIMEOUT=20
QDRANT_GRPC_ENABLED=false
QDRANT_GRPC_PORT=6334
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus uri.
MILVUS_URI=http://127.0.0.1:19530
MILVUS_TOKEN=
MILVUS_USER=root
MILVUS_PASSWORD=Milvus
MILVUS_ENABLE_HYBRID_SEARCH=False
# MyScale configuration, only available when VECTOR_STORE is `myscale`
# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to:
# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters
MYSCALE_HOST=myscale
MYSCALE_PORT=8123
MYSCALE_USER=default
MYSCALE_PASSWORD=
MYSCALE_DATABASE=dify
MYSCALE_FTS_PARAMS=
# Chroma configuration, only available when VECTOR_STORE is `chroma`
CHROMA_HOST=127.0.0.1
CHROMA_PORT=8000
CHROMA_TENANT=default_tenant
CHROMA_DATABASE=default_database
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS=
# Oracle configuration, only available when VECTOR_STORE is `oracle`
ORACLE_HOST=oracle
ORACLE_PORT=1521
ORACLE_USER=dify
ORACLE_PASSWORD=dify
ORACLE_DATABASE=FREEPDB1
# ------------------------------
# Knowledge Configuration
# ------------------------------
# Upload file size limit, default 15M.
UPLOAD_FILE_SIZE_LIMIT=150
# The maximum number of files that can be uploaded at a time, default 5.
UPLOAD_FILE_BATCH_LIMIT=10
# ETL type, support: `dify`, `Unstructured`
# `dify` Dify's proprietary file extraction scheme
# `Unstructured` Unstructured.io file extraction scheme
ETL_TYPE=dify
# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured
# Or using Unstructured for document extractor node for pptx.
# For example: http://unstructured:8000/general/v0/general
UNSTRUCTURED_API_URL=
UNSTRUCTURED_API_KEY=
SCARF_NO_ANALYTICS=true
# ------------------------------
# Model Configuration
# ------------------------------
# The maximum number of tokens allowed for prompt generation.
# This setting controls the upper limit of tokens that can be used by the LLM
# when generating a prompt in the prompt generation tool.
# Default: 512 tokens.
PROMPT_GENERATION_MAX_TOKENS=4096
# The maximum number of tokens allowed for code generation.
# This setting controls the upper limit of tokens that can be used by the LLM
# when generating code in the code generation tool.
# Default: 1024 tokens.
CODE_GENERATION_MAX_TOKENS=20480
# ------------------------------
# Multi-modal Configuration
# ------------------------------
# The format of the image/video/audio/document sent when the multi-modal model is input,
# the default is base64, optional url.
# The delay of the call in url mode will be lower than that in base64 mode.
# It is generally recommended to use the more compatible base64 mode.
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
MULTIMODAL_SEND_FORMAT=base64
# Upload image file size limit, default 10M.
UPLOAD_IMAGE_FILE_SIZE_LIMIT=100
# Upload video file size limit, default 100M.
UPLOAD_VIDEO_FILE_SIZE_LIMIT=10000
# Upload audio file size limit, default 50M.
UPLOAD_AUDIO_FILE_SIZE_LIMIT=500
# ------------------------------
# Sentry Configuration
# Used for application monitoring and error log tracking.
# ------------------------------
SENTRY_DSN=
# API Service Sentry DSN address, default is empty, when empty,
# all monitoring information is not reported to Sentry.
# If not set, Sentry error reporting will be disabled.
API_SENTRY_DSN=
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
API_SENTRY_TRACES_SAMPLE_RATE=1.0
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
API_SENTRY_PROFILES_SAMPLE_RATE=1.0
# Web Service Sentry DSN address, default is empty, when empty,
# all monitoring information is not reported to Sentry.
# If not set, Sentry error reporting will be disabled.
WEB_SENTRY_DSN=
# ------------------------------
# Notion Integration Configuration
# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
# ------------------------------
# Configure as "public" or "internal".
# Since Notion's OAuth redirect URL only supports HTTPS,
# if deploying locally, please use Notion's internal integration.
NOTION_INTEGRATION_TYPE=internal
# Notion OAuth client secret (used for public integration type)
NOTION_CLIENT_SECRET=
# Notion OAuth client id (used for public integration type)
NOTION_CLIENT_ID=
# Notion internal integration secret.
# If the value of NOTION_INTEGRATION_TYPE is "internal",
# you need to configure this variable.
NOTION_INTERNAL_SECRET=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
# ------------------------------
# Mail related configuration
# ------------------------------
# Mail type, support: resend, smtp
MAIL_TYPE=resend
# Default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM=
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
RESEND_API_URL=https://api.resend.com
RESEND_API_KEY=your-resend-api-key
# SMTP server configuration, used when MAIL_TYPE is `smtp`
SMTP_SERVER=
SMTP_PORT=465
SMTP_USERNAME=
SMTP_PASSWORD=
SMTP_USE_TLS=true
SMTP_OPPORTUNISTIC_TLS=false
# ------------------------------
# Others Configuration
# ------------------------------
# Maximum length of segmentation tokens for indexing
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
# Member invitation link valid time (hours),
# Default: 72.
INVITE_EXPIRY_HOURS=72
# Reset password token valid time (minutes),
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
# The sandbox service endpoint.
CODE_EXECUTION_ENDPOINT=http://sandbox:8194
CODE_EXECUTION_API_KEY=dify-sandbox
CODE_MAX_NUMBER=9223372036854775807
CODE_MIN_NUMBER=-9223372036854775808
CODE_MAX_DEPTH=5
CODE_MAX_PRECISION=20
CODE_MAX_STRING_LENGTH=80000
CODE_MAX_STRING_ARRAY_LENGTH=30
CODE_MAX_OBJECT_ARRAY_LENGTH=30
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
CODE_EXECUTION_CONNECT_TIMEOUT=10
CODE_EXECUTION_READ_TIMEOUT=60
CODE_EXECUTION_WRITE_TIMEOUT=10
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
# Workflow runtime configuration
WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
MAX_VARIABLE_SIZE=204800
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
WORKFLOW_FILE_UPLOAD_LIMIT=10
# HTTP request node in workflow configuration
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
# 指向 Amd64-02
#SSRF_PROXY_ALL_URL=socks5://10.0.0.246:2234
# SSRF Proxy server HTTP URL
SSRF_PROXY_HTTP_URL=http://10.0.0.246:1234
# SSRF Proxy server HTTPS URL
SSRF_PROXY_HTTPS_URL=http://10.0.0.246:2234
# ------------------------------
# Environment Variables for web Service
# ------------------------------
# The timeout for the text generation in millisecond
TEXT_GENERATION_TIMEOUT_MS=60000
# ------------------------------
# Environment Variables for db Service
# ------------------------------
PGUSER=${DB_USERNAME}
# The password for the default postgres user.
POSTGRES_PASSWORD=${DB_PASSWORD}
# The name of the default postgres database.
POSTGRES_DB=${DB_DATABASE}
# postgres data directory
PGDATA=/var/lib/postgresql/data/pgdata
# ------------------------------
# Environment Variables for sandbox Service
# ------------------------------
# The API key for the sandbox service
SANDBOX_API_KEY=dify-sandbox
# The mode in which the Gin framework runs
SANDBOX_GIN_MODE=release
# The timeout for the worker in seconds
SANDBOX_WORKER_TIMEOUT=15
# Enable network for the sandbox service
SANDBOX_ENABLE_NETWORK=true
# HTTP proxy URL for SSRF protection
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
# HTTPS proxy URL for SSRF protection
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
# The port on which the sandbox service runs
SANDBOX_PORT=8194
# ------------------------------
# Environment Variables for weaviate Service
# (only used when VECTOR_STORE is weaviate)
# ------------------------------
WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
WEAVIATE_CLUSTER_HOSTNAME=node1
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
# ------------------------------
# Environment Variables for SSRF Proxy
# ------------------------------
SSRF_HTTP_PORT=3128
SSRF_COREDUMP_DIR=/var/spool/squid
SSRF_REVERSE_PROXY_PORT=8194
SSRF_SANDBOX_HOST=sandbox
SSRF_DEFAULT_TIME_OUT=5
SSRF_DEFAULT_CONNECT_TIME_OUT=5
SSRF_DEFAULT_READ_TIME_OUT=5
SSRF_DEFAULT_WRITE_TIME_OUT=5
# ------------------------------
# docker env var for specifying vector db type at startup
# (based on the vector db type, the corresponding docker
# compose profile will be used)
# if you want to use unstructured, add ',unstructured' to the end
# ------------------------------
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
# ------------------------------
# Docker Compose Service Expose Host Port Configurations
# ------------------------------
EXPOSE_NGINX_PORT=20080
EXPOSE_NGINX_SSL_PORT=20443
# ----------------------------------------------------------------------------
# ModelProvider & Tool Position Configuration
# Used to specify the model providers and tools that can be used in the app.
# ----------------------------------------------------------------------------
# Pin, include, and exclude tools
# Use comma-separated values with no spaces between items.
# Example: POSITION_TOOL_PINS=bing,google
POSITION_TOOL_PINS=
POSITION_TOOL_INCLUDES=
POSITION_TOOL_EXCLUDES=
# Pin, include, and exclude model providers
# Use comma-separated values with no spaces between items.
# Example: POSITION_PROVIDER_PINS=openai,openllm
POSITION_PROVIDER_PINS=
POSITION_PROVIDER_INCLUDES=
POSITION_PROVIDER_EXCLUDES=
# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
CSP_WHITELIST=
# Enable or disable create tidb service job
CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100
# The maximum number of top-k value for RAG.
TOP_K_MAX_VALUE=10
# ------------------------------
# Plugin Daemon Configuration
# ------------------------------
DB_PLUGIN_DATABASE=dify_plugin
EXPOSE_PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
PLUGIN_DAEMON_URL=http://10.0.0.12:5002
PLUGIN_MAX_PACKAGE_SIZE=52428800
PLUGIN_PPROF_ENABLED=false
PLUGIN_DEBUGGING_HOST=0.0.0.0
PLUGIN_DEBUGGING_PORT=5003
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
PLUGIN_DIFY_INNER_API_URL=http://10.0.0.12:5001
ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai
FORCE_VERIFYING_SIGNATURE=true
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
PLUGIN_MAX_EXECUTION_TIMEOUT=600
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
PIP_MIRROR_URL=

View File

@@ -0,0 +1,69 @@
# ==================================================================
# WARNING: This file is auto-generated by generate_docker_compose
# Do not modify this file directly. Instead, update the .env.example
# or docker-compose-template.yaml and regenerate this file.
# ==================================================================
services:
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
PGUSER: ${PGUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-V2rayStrP@ss}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-1024MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-64MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-128MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-2048MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
ports:
- 5432:5432
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
restart: always
volumes:
# Mount the Weaviate data directory to the con tainer.
- /mnt/ramdisk/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-50}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
ports:
- 8080:8080
redis:
image: redis:6-alpine
restart: always
environment:
REDISCLI_AUTH: ${REDIS_PASSWORD:-V2rayStrP@ss}
volumes:
# Mount the redis data directory to the container.
- /mnt/ramdisk/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-V2rayStrP@ss}
healthcheck:
test: [ 'CMD', 'redis-cli', 'ping' ]
ports:
- 6379:6379

View File

@@ -0,0 +1,4 @@
129.146.57.94
10.0.0.247

View File

@@ -2,3 +2,5 @@
### https://www.dejavu.moe/posts/selfhosted-bitwarden-with-cloudflare-tunnel/
官方使用说明 https://github.com/dani-garcia/vaultwarden/wiki

View File

@@ -9,6 +9,12 @@ EMERGENCY_ACCESS_ALLOWED=true
# 日志等级
LOG_LEVEL=warn
# 是否禁止
DISABLE_ADMIN_TOKEN=true
# 是否允许注册
SIGNUPS_ALLOWED=false
# 注册需要验证?
SIGNUPS_VERIFY=true
SIGNUPS_VERIFY_RESEND_TIME=3600
@@ -21,7 +27,7 @@ SIGNUPS_DOMAINS_WHITELIST=107421.xyz
ORG_CREATION_USERS=you@107421.xyz
# 使用 openssl rand -base64 48 命令快速生成管理员令牌
ADMIN_TOKEN=WnzCmaUPhFsN5jUphoazxw4hlh2cfwjlUPPKJOXxYdLAApxuoFiCIgIjUO8HbaY7
# ADMIN_TOKEN=WnzCmaUPhFsN5jUphoazxw4hlh2cfwjlUPPKJOXxYdLAApxuoFiCIgIjUO8HbaY7
# 允许邀请?
INVITATIONS_ALLOWED=true

View File

@@ -2,7 +2,7 @@ version: '3.3'
services:
vaultwarden:
image: vaultwarden/server:alpine
image: vaultwarden/server:1.34.3-alpine
container_name: vault-warden
restart: always
environment:

View File

@@ -44,21 +44,21 @@ services:
cron:
image: nextcloud:27.0.1-apache
image: nextcloud:27.0.1-apache
restart: always
restart: always
volumes:
volumes:
- /data/nextcloud/cron:/var/www/html
- /data/nextcloud/cron:/var/www/html
entrypoint: /cron.sh
entrypoint: /cron.sh
depends_on:
depends_on:
- db
- db
- redis
- redis

View File

@@ -0,0 +1,191 @@
# This file is a template for docker compose deployment
# Copy this file to .env and change the values as needed
# Fully qualified domain name for the deployment. Replace localhost with your domain,
# such as http://mydomain.com.
FQDN=http://localhost
# PostgreSQL Settings
POSTGRES_HOST=postgres
POSTGRES_USER=postgres
POSTGRES_PASSWORD=password
POSTGRES_PORT=5432
POSTGRES_DB=postgres
# Postgres credential for supabase_auth_admin
SUPABASE_PASSWORD=root
# Redis Settings
REDIS_HOST=redis
REDIS_PORT=6379
# Minio Host
MINIO_HOST=minio
MINIO_PORT=9000
AWS_ACCESS_KEY=minioadmin
AWS_SECRET=minioadmin
# AppFlowy Cloud
## URL that connects to the gotrue docker container
APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999
## URL that connects to the postgres docker container. If your password contains special characters, instead of using ${POSTGRES_PASSWORD},
## you will need to convert them into url encoded format. For example, `p@ssword` will become `p%40ssword`.
APPFLOWY_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_ACCESS_CONTROL=true
APPFLOWY_WEBSOCKET_MAILBOX_SIZE=6000
APPFLOWY_DATABASE_MAX_CONNECTIONS=40
## URL that connects to the redis docker container
APPFLOWY_REDIS_URI=redis://${REDIS_HOST}:${REDIS_PORT}
# admin frontend
## URL that connects to redis docker container
ADMIN_FRONTEND_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
## URL that connects to gotrue docker container
ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999
## URL that connects to the cloud docker container
ADMIN_FRONTEND_APPFLOWY_CLOUD_URL=http://appflowy_cloud:8000
## Base Url for the admin frontend. If you use the default Nginx conf provided here, this value should be /console.
## If you want to keep the previous behaviour where admin frontend is served at the root, don't set this env variable,
## or set it to empty string.
ADMIN_FRONTEND_PATH_PREFIX=/console
# authentication key, change this and keep the key safe and secret
# self defined key, you can use any string
GOTRUE_JWT_SECRET=hello456
# Expiration time in seconds for the JWT token
GOTRUE_JWT_EXP=7200
# User sign up will automatically be confirmed if this is set to true.
# If you have OAuth2 set up or smtp configured, you can set this to false
# to enforce email confirmation or OAuth2 login instead.
# If you set this to false, you need to either set up SMTP
GOTRUE_MAILER_AUTOCONFIRM=true
# Number of emails that can be per minute
GOTRUE_RATE_LIMIT_EMAIL_SENT=100
# If you intend to use mail confirmation, you need to set the SMTP configuration below
# You would then need to set GOTRUE_MAILER_AUTOCONFIRM=false
# Check for logs in gotrue service if there are any issues with email confirmation
# Note that smtps will be used for port 465, otherwise plain smtp with optional STARTTLS
GOTRUE_SMTP_HOST=smtp.gmail.com
GOTRUE_SMTP_PORT=465
GOTRUE_SMTP_USER=email_sender@some_company.com
GOTRUE_SMTP_PASS=email_sender_password
GOTRUE_SMTP_ADMIN_EMAIL=comp_admin@some_company.com
# This user will be created when GoTrue starts successfully
# You can use this user to login to the admin panel
GOTRUE_ADMIN_EMAIL=admin@example.com
GOTRUE_ADMIN_PASSWORD=password
# Set this to true if users can only join by invite
GOTRUE_DISABLE_SIGNUP=false
# External URL where the GoTrue service is exposed.
API_EXTERNAL_URL=${FQDN}/gotrue
# GoTrue connect to postgres using this url. If your password contains special characters,
# replace ${SUPABASE_PASSWORD} with the url encoded version. For example, `p@ssword` will become `p%40ssword`
GOTRUE_DATABASE_URL=postgres://supabase_auth_admin:${SUPABASE_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# Refer to this for details: https://github.com/AppFlowy-IO/AppFlowy-Cloud/blob/main/doc/AUTHENTICATION.md
# Google OAuth2
GOTRUE_EXTERNAL_GOOGLE_ENABLED=false
GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID=
GOTRUE_EXTERNAL_GOOGLE_SECRET=
GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# GitHub OAuth2
GOTRUE_EXTERNAL_GITHUB_ENABLED=false
GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=
GOTRUE_EXTERNAL_GITHUB_SECRET=
GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# Discord OAuth2
GOTRUE_EXTERNAL_DISCORD_ENABLED=false
GOTRUE_EXTERNAL_DISCORD_CLIENT_ID=
GOTRUE_EXTERNAL_DISCORD_SECRET=
GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# Apple OAuth2
GOTRUE_EXTERNAL_APPLE_ENABLED=false
GOTRUE_EXTERNAL_APPLE_CLIENT_ID=
GOTRUE_EXTERNAL_APPLE_SECRET=
GOTRUE_EXTERNAL_APPLE_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# File Storage
# Create the bucket if not exists on AppFlowy Cloud start up.
# Set this to false if the bucket has been created externally.
APPFLOWY_S3_CREATE_BUCKET=true
# This is where storage like images, files, etc. will be stored.
# By default, Minio is used as the default file storage which uses host's file system.
# Keep this as true if you are using other S3 compatible storage provider other than AWS.
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT} # change this if you are using a different address for minio
APPFLOWY_S3_ACCESS_KEY=${AWS_ACCESS_KEY}
APPFLOWY_S3_SECRET_KEY=${AWS_SECRET}
APPFLOWY_S3_BUCKET=appflowy
# Uncomment this if you are using AWS S3
# APPFLOWY_S3_REGION=us-east-1
# Uncomment this if you are using the Minio service hosted within this docker compose file
# This is so that, the presigned URL generated by AppFlowy Cloud will use the publicly availabe minio endpoint.
# APPFLOWY_S3_PRESIGNED_URL_ENDPOINT=${FQDN}/minio-api
# AppFlowy Cloud Mailer
# Note that smtps (TLS) is always required, even for ports other than 465
APPFLOWY_MAILER_SMTP_HOST=smtp.gmail.com
APPFLOWY_MAILER_SMTP_PORT=465
APPFLOWY_MAILER_SMTP_USERNAME=email_sender@some_company.com
APPFLOWY_MAILER_SMTP_EMAIL=email_sender@some_company.com
APPFLOWY_MAILER_SMTP_PASSWORD=email_sender_password
APPFLOWY_MAILER_SMTP_TLS_KIND=wrapper # "none" "wrapper" "required" "opportunistic"
# Log level for the appflowy-cloud service
RUST_LOG=info
# PgAdmin
# Optional module to manage the postgres database
# You can access the pgadmin at http://your-host/pgadmin
# Refer to the APPFLOWY_DATABASE_URL for password when connecting to the database
PGADMIN_DEFAULT_EMAIL=admin@example.com
PGADMIN_DEFAULT_PASSWORD=password
# Portainer (username: admin)
PORTAINER_PASSWORD=password1234
# Cloudflare tunnel token
CLOUDFLARE_TUNNEL_TOKEN=
# NGINX
# Optional, change this if you want to use custom ports to expose AppFlowy
NGINX_PORT=80
NGINX_TLS_PORT=443
# AppFlowy AI
AI_OPENAI_API_KEY=
AI_SERVER_PORT=5001
AI_SERVER_HOST=ai
AI_DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
AI_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
LOCAL_AI_TEST_ENABLED=false
AI_APPFLOWY_BUCKET_NAME=${APPFLOWY_S3_BUCKET}
AI_APPFLOWY_HOST=${FQDN}
AI_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
# AppFlowy Indexer
APPFLOWY_INDEXER_ENABLED=true
APPFLOWY_INDEXER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_INDEXER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_INDEXER_EMBEDDING_BUFFER_SIZE=5000
# AppFlowy Collaborate
APPFLOWY_COLLABORATE_MULTI_THREAD=false
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
# AppFlowy Worker
APPFLOWY_WORKER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_WORKER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Web
# If your AppFlowy Web is hosted on a different domain, update this variable to the correct domain
APPFLOWY_WEB_URL=${FQDN}
# If you are running AppFlowy Web locally for development purpose, use the following value instead
# APPFLOWY_WEB_URL=http://localhost:3000

View File

@@ -0,0 +1,8 @@
docker run --rm \
-v $HOME/.Xauthority:/root/.Xauthority:rw \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/dri:/dev/dri \
-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
-v appflowy-data:/home/appflowy \
-e DISPLAY=${DISPLAY} \
appflowyio/appflowy_client:main

View File

@@ -1,6 +1,10 @@
# This file is a template for docker compose deployment
# Copy this file to .env and change the values as needed
# Fully qualified domain name for the deployment. Replace localhost with your domain,
# such as http://mydomain.com.
FQDN=http://0.0.0.0:23000
# PostgreSQL Settings
POSTGRES_HOST=postgres
POSTGRES_USER=postgres
@@ -15,6 +19,13 @@ SUPABASE_PASSWORD=V2ryStr@Pss
REDIS_HOST=redis
REDIS_PORT=6379
# Minio Host
MINIO_HOST=https://cnk8d6fazu16.compat.objectstorage.ap-seoul-1.oraclecloud.com
MINIO_PORT=443
AWS_ACCESS_KEY=9e413c6e66269bc65d7ec951d93ba9c6a9781f6e
AWS_SECRET=dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU
# AppFlowy Cloud
## URL that connects to the gotrue docker container
APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999
@@ -31,6 +42,12 @@ APPFLOWY_REDIS_URI=redis://${REDIS_HOST}:${REDIS_PORT}
ADMIN_FRONTEND_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
## URL that connects to gotrue docker container
ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999
## URL that connects to the cloud docker container
ADMIN_FRONTEND_APPFLOWY_CLOUD_URL=http://appflowy_cloud:8000
## Base Url for the admin frontend. If you use the default Nginx conf provided here, this value should be /console.
## If you want to keep the previous behaviour where admin frontend is served at the root, don't set this env variable,
## or set it to empty string.
ADMIN_FRONTEND_PATH_PREFIX=/console
# authentication key, change this and keep the key safe and secret
# self defined key, you can use any string
@@ -42,7 +59,7 @@ GOTRUE_JWT_EXP=7200
# If you have OAuth2 set up or smtp configured, you can set this to false
# to enforce email confirmation or OAuth2 login instead.
# If you set this to false, you need to either set up SMTP
GOTRUE_MAILER_AUTOCONFIRM=true
GOTRUE_MAILER_AUTOCONFIRM=false
# Number of emails that can be per minute
GOTRUE_RATE_LIMIT_EMAIL_SENT=100
@@ -66,7 +83,7 @@ GOTRUE_ADMIN_PASSWORD=lovemm.23
# If you are using a different domain, you need to change the redirect_uri in the OAuth2 configuration
# Make sure that this domain is accessible to the user
# Make sure no endswith /
API_EXTERNAL_URL=https://note.107421.xyz
API_EXTERNAL_URL=${FQDN}/gotrue
# In docker environment, `postgres` is the hostname of the postgres service
# GoTrue connect to postgres using this url
@@ -102,15 +119,15 @@ APPFLOWY_S3_CREATE_BUCKET=false
# By default, Minio is used as the default file storage which uses host's file system.
# Keep this as true if you are using other S3 compatible storage provider other than AWS.
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=https://cnk8d6fazu16.compat.objectstorage.ap-seoul-1.oraclecloud.com
APPFLOWY_S3_ACCESS_KEY=9e413c6e66269bc65d7ec951d93ba9c6a9781f6e
APPFLOWY_S3_SECRET_KEY=dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU=
APPFLOWY_S3_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
APPFLOWY_S3_ACCESS_KEY=${AWS_ACCESS_KEY}
APPFLOWY_S3_SECRET_KEY=${AWS_SECRET}
APPFLOWY_S3_BUCKET=seoul-2
APPFLOWY_S3_REGION=ap-seoul-1
# AppFlowy Cloud Mailer
# Note that smtps (TLS) is always required, even for ports other than 465
APPFLOWY_MAILER_SMTP_HOST=smtp.gmail.com
APPFLOWY_MAILER_SMTP_HOST=https://smtp.gmail.com
APPFLOWY_MAILER_SMTP_PORT=465
APPFLOWY_MAILER_SMTP_USERNAME=zeaslity@gmail.com
APPFLOWY_MAILER_SMTP_PASSWORD=loveff.cxc.56320
@@ -146,11 +163,10 @@ APPFLOWY_AI_SERVER_PORT=5001
APPFLOWY_AI_SERVER_HOST=ai
APPFLOWY_AI_DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_LOCAL_AI_TEST_ENABLED=false
AI_APPFLOWY_BUCKET_NAME=${APPFLOWY_S3_BUCKET}
AI_APPFLOWY_HOST=${FQDN}
AI_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
# AppFlowy History
APPFLOWY_GRPC_HISTORY_ADDRS=http://localhost:50051
APPFLOWY_HISTORY_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_HISTORY_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Indexer
APPFLOWY_INDEXER_ENABLED=true
@@ -166,4 +182,4 @@ APPFLOWY_WORKER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_WORKER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Web
APPFLOWY_WEB_URL=http://localhost:3000
APPFLOWY_WEB_URL=${FQDN}

View File

@@ -0,0 +1,514 @@
<p align="center">
<img src="./assets/logo-3071751.jpg">
</p>
# 🤖️ TeleChat
[英文](README.md) | [中文](README_CN.md)
<p align="center">
<a href="https://t.me/+_01cz9tAkUc1YzZl">
<img src="https://img.shields.io/badge/Join Telegram Group-blue?&logo=telegram">
</a>
<a href="https://t.me/chatgpt68_bot">
<img src="https://img.shields.io/badge/Telegram Bot-grey?&logo=Probot">
</a>
<a href="https://hub.docker.com/repository/docker/yym68686/chatgpt">
<img src="https://img.shields.io/docker/pulls/yym68686/chatgpt?color=blue" alt="docker pull">
</a>
</p>
ChatGPT Telegram 机器人是一个强大的 Telegram 机器人,可以使用多种主流的大语言模型 API包括 GPT-3.5/4/4 Turbo/4o/o1DALL·E 3Claude2.1/3/3.5 APIGemini 1.5 Pro/FlashVertex AIClaude系列/Gemini系列Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。它使用户能够在 Telegram 上进行高效的对话和信息搜索。
## ✨ 功能
- **多种AI模型**支持GPT-3.5/4/4 Turbo/4o/o1DALL·E 3Claude2.1/3/3.5 APIGemini 1.5 Pro/FlashVertex AIClaude系列/Gemini系列Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。还支持 one-api/new-api/[uni-api](https://github.com/yym68686/uni-api)。利用自研 API 请求后端 [SDK](https://github.com/yym68686/ModelMerge),不依赖 OpenAI SDK。
- **多模态问答**:支持语音、音频、图像和 PDF/TXT/MD/python 文档的问答。用户可以直接在聊天框中上传文件使用。
- **群聊主题模式**支持在群聊中启用主题模式在主题之间隔离API、对话历史、插件配置和偏好设置。
- **丰富的插件系统**支持网页搜索DuckDuckGo和Google、URL 总结、ArXiv 论文总结和代码解释器。
- **用户友好界面**:允许在聊天窗口内灵活切换模型,并支持类似打字机效果的流式输出。支持精确的 Markdown 消息渲染,利用了我的另一个[项目](https://github.com/yym68686/md2tgmd)。
- **高效消息处理**:异步处理消息,多线程回答问题,支持隔离对话,并为不同用户提供独特对话。
- **长文本消息处理**: 自动合并长文本消息突破Telegram的单条消息长度限制。当机器人的回复超过Telegram的限制时它将被拆分成多条消息。
- **多用户对话隔离**:支持对话隔离和配置隔离,允许在多用户和单用户模式之间进行选择。
- **问题预测**: 自动生成后续问题,预测用户可能会接下来询问的问题。
- **多语言界面**:支持简体中文、繁体中文、俄文和英文界面。
- **白名单、黑名单和管理员设置**:支持设置白名单、黑名单和管理员。
- **内联模式**:允许用户在任何聊天窗口中 @ 机器人以生成答案,而无需在机器人的聊天窗口中提问。
- **方便部署**:支持一键部署到 koyeb、Zeabur、Replit真正零成本和傻瓜式部署流程。还支持 kuma 防休眠,以及 Docker 和 fly.io 部署。
## 🍃 环境变量
以下是与机器人核心设置相关的环境变量列表:
| 变量名称 | 描述 | 是否必需? |
|---------------|-------------|-----------|
| BOT_TOKEN | Telegram 机器人令牌。 在 [BotFather](https://t.me/BotFather) 上创建一个机器人以获取 BOT_TOKEN。 | **是** |
| API | OpenAI 或第三方 API 密钥。 | 否 |
| GPT_ENGINE | 设置默认的QA模型默认是`gpt-4o`。此项可以使用机器人的“info”命令自由切换原则上不需要设置。 | 否 |
| WEB_HOOK | 每当电报机器人收到用户消息时,消息将被传递到 WEB_HOOK机器人将在此监听并及时处理收到的消息。 | 否 |
| API_URL | 如果您使用的是OpenAI官方API则无需设置此项。如果您使用的是第三方API则需要填写第三方代理网站。默认值是https://api.openai.com/v1/chat/completions | 否 |
| GROQ_API_KEY | Groq官方API密钥。 | 否 |
| GOOGLE_AI_API_KEY | Google AI 官方 API 密钥。使用此环境变量访问 Gemini 系列模型,包括 Gemini 1.5 pro 和 Gemini 1.5 flash。| 否 |
| VERTEX_PRIVATE_KEY | 描述: Google Cloud Vertex AI 服务账户的私钥。格式: 包含服务账户私钥信息的 JSON 字符串里面的 private_key 字段的值,请使用双引号包裹私钥。如何获取: 在 Google Cloud 控制台中创建一个服务账户,生成一个 JSON 密钥文件,并将其内容里面的 private_key 字段的值使用双引号包裹后设置为此环境变量的值。 | 否 |
| VERTEX_PROJECT_ID | 描述:您的 Google Cloud 项目 ID。格式一个字符串通常由小写字母、数字和连字符组成。如何获取您可以在 Google Cloud 控制台的项目选择器中找到您的项目 ID。 | 否 |
| VERTEX_CLIENT_EMAIL | 描述Google Cloud Vertex AI 服务账户的电子邮件地址。格式:通常是 "service-account-name@developer.gserviceaccount.com" 形式的字符串。获取方式:在创建服务账户时生成,或可以在 Google Cloud 控制台的 "IAM & 管理" 部分的服务账户详细信息中查看。 | 否 |
| claude_api_key | Claude 官方 API 密钥。 | 否 |
| CLAUDE_API_URL | 如果您使用的是Anthropic官方API则无需设置此项。如果您使用的是第三方Anthropic API则需要填写第三方代理网站。默认值是https://api.anthropic.com/v1/messages | 否 |
| NICK | 默认是空的NICK 是机器人的名字。机器人只会在用户输入的消息以 NICK 开头时才会响应,否则机器人会响应任何消息。特别是在群聊中,如果没有 NICK机器人会回复所有消息。 | 否 |
| GOOGLE_API_KEY | 如果你需要使用谷歌搜索你需要设置它。如果你不设置这个环境变量机器人将默认提供duckduckgo搜索。 | No |
| GOOGLE_CSE_ID | 如果你需要使用谷歌搜索,你需要和 GOOGLE_API_KEY 一起设置。 | 否 |
| whitelist | 设置哪些用户可以访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None`,这意味着机器人对所有人开放。 | 否 |
| BLACK_LIST | 设置哪些用户禁止访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None` | 否 |
| ADMIN_LIST | 设置管理员列表。只有管理员可以使用 `/info` 命令配置机器人。 | 否 |
| GROUP_LIST | 设置可以使用机器人的群组列表。使用逗号(''连接群组ID。即使群组成员不在白名单中只要群组ID在GROUP_LIST中群组的所有成员都可以使用机器人。 | 否 |
| CUSTOM_MODELS | 设置自定义模型名称列表。使用逗号(',')连接模型名称。如果需要删除默认模型,请在默认模型名称前添加连字符(-)。如果要删除所有默认模型,请使用 `-all`。 | 否 |
| CHAT_MODE | 引入多用户模式,不同用户的配置不共享。当 CHAT_MODE 为 `global` 时,所有用户共享配置。当 CHAT_MODE 为 `multiusers` 时,用户配置彼此独立。 | 否 |
| temperature | 指定 LLM 的温度。默认值是 `0.5`。 | 否 |
| GET_MODELS | 指定是否通过 API 获取支持的模型。默认值为 `False`。 | 否 |
| SYSTEMPROMPT | 指定系统提示,系统提示是字符串,例如:`SYSTEMPROMPT=You are ChatGPT, a large language model trained by OpenAI. Respond conversationally`。默认是 `None`。系统提示的设置仅在 `CHAT_MODE``global` 时,系统提示的设置才会有效。当 `CHAT_MODE``multiusers` 时,系统提示的环境变量无论是任何值都不会修改任何用户的系统提示,因为用户不希望自己设置的系统系统被修改为全局系统提示。 | 否 |
| LANGUAGE | 指定机器人显示的默认语言,包括按钮显示语言和对话语言。默认是 `English`。目前仅支持设置为下面四种语言:`English``Simplified Chinese``Traditional Chinese``Russian`。同时也可以在机器人部署后使用 `/info` 命令设置显示语言 | 否 |
| CONFIG_DIR | 指定存储用户配置文件夹。CONFIG_DIR 是用于存储用户配置的文件夹。每次机器人启动时,它都会从 CONFIG_DIR 文件夹读取配置,因此用户每次重新启动时不会丢失之前的设置。您可以在本地使用 Docker 部署时,通过使用 `-v` 参数挂载文件夹来实现配置持久化。默认值是 `user_configs`。 | 否 |
| RESET_TIME | 指定机器人每隔多少秒重置一次聊天历史记录,每隔 RESET_TIME 秒,机器人会重置除了管理员列表外所有用户的聊天历史记录,每个用户重置时间不一样,根据每个用户最后的提问时间来计算下一次重置时间。而不是所有用户在同一时间重置。默认值是 `3600` 秒,最小值是 `60` 秒。 | 否 |
以下是与机器人偏好设置相关的环境变量列表,偏好设置也可以通过机器人启动后使用 `/info` 命令,点击 `偏好设置` 按钮来设置:
| 变量名称 | 描述 | 必需的? |
|---------------|-------------|-----------|
| PASS_HISTORY | 默认值是 `9999`。机器人会记住对话历史,并在下次回复时考虑上下文。如果设置为 `0`机器人将忘记对话历史只考虑当前对话。PASS_HISTORY 的值必须大于或等于 0。对应于偏好设置里面的名为 `对话历史` 的按钮。 | 否 |
| LONG_TEXT | 如果用户的输入消息的文本长度超出了 Telegram 的限制,并在很短的时间内连续发送多个消息,机器人会将这些多个消息视为一个。默认值是 `True`。对应于偏好设置里面的名为 `长文本合并` 的按钮。 | 否 |
| IMAGEQA | 是否启用图像问答,默认设置是模型可以回答图像内容,默认值为 `True`。对应于偏好设置里面的名为 `图片问答` 的按钮。 | 否 |
| LONG_TEXT_SPLIT | 当机器人的回复超过Telegram限制时它将被拆分为多个消息。默认值是 `True`。对应于偏好设置里面的名为 `长文本分割` 的按钮。 | 否 |
| FILE_UPLOAD_MESS | 当文件或图像上传成功并且机器人处理完成时,机器人将发送一条消息,提示上传成功。默认值为 `True`。对应于偏好设置里面的名为 `文件上传成功提示消息` 的按钮。 | 否 |
| FOLLOW_UP | 自动生成多个相关问题供用户选择。默认值为 `False`。对应于偏好设置里面的名为 `猜你想问` 的按钮。 | 否 |
| TITLE | 是否在机器人回复的开头显示模型名称。默认值为 `False`。对应于偏好设置里面的名为 `模型标题` 的按钮。 | 否 |
<!-- | TYPING | 是否在机器人回复时显示“正在输入”状态。默认值为 `False`。 | 否 | -->
| REPLY | 机器人是否应以“回复”格式回复用户的消息。默认值为 `False`。对应于偏好设置里面的名为 `回复消息` 的按钮。 | 否 |
以下是与机器人插件设置相关的环境变量列表:
| 变量名称 | 描述 | 必需的? |
|---------------|-------------|-----------|
| SEARCH | 是否启用搜索插件。默认值为 `True`。 | 否 |
| URL | 是否启用URL摘要插件。默认值为 `True`。 | 否 |
| ARXIV | 是否启用arXiv论文摘要插件。默认值为 `False`。 | 否 |
| CODE | 是否启用代码解释器插件。默认值为 `False`。 | 否 |
| IMAGE | 是否启用图像生成插件。默认值为 `False`。 | 否 |
| DATE | 是否启用日期插件。默认值为 `False`。 | 否 |
## Koyeb 远程部署
可以使用两种方式部署在 koyeb 上部署,一种是使用 Koyeb 提供的 docker 镜像一键部署,另一种是导入本仓库部署。这两种方式都是免费的。第一种方式部署简单,但是无法自动更新,第二种方式部署稍微复杂,但是可以自动更新。
### 一键部署
点击下面的按钮可以自动使用构建好的 docker 镜像一键部署:
[![Deploy to Koyeb](https://www.koyeb.com/static/images/deploy/button.svg)](https://app.koyeb.com/deploy?type=docker&image=docker.io/yym68686/chatgpt:latest&name=chatbot)
⚠️ 注意:使用 Koyeb 部署时,必须添加环境变量 `WEB_HOOK`,否则机器人无法接收消息。使用类似 `https://appname.koyeb.app` 的字符串作为 `WEB_HOOK` 的值Koyeb 会自动分配一个二级域名。
### 仓库部署
1. fork 本仓库 [点击 fork 本仓库](https://github.com/yym68686/ChatGPT-Telegram-Bot/fork)
2. 部署时候需要选择以仓库的方式,`Run command` 设置为 `python3 bot.py``Exposed ports` 设置为 `8080`
3. [安装 pull](https://github.com/apps/pull) 自动同步本仓库。
## Zeabur 远程部署
一键部署:
[![在 Zeabur 上部署](https://zeabur.com/button.svg)](https://zeabur.com/templates/R5JY5O?referralCode=yym68686)
如果您需要后续功能更新,建议采用以下部署方法:
- 首先 fork 这个仓库,然后注册 [Zeabur](https://zeabur.com)。目前Zeabur 不支持免费的 Docker 容器部署。如果你需要使用 Zeabur 来部署这个项目的机器人,你需要升级到 Developer Plan。幸运的是Zeabur 推出了他们的[赞助计划](https://zeabur.com/docs/billing/sponsor),为这个项目的所有贡献者提供一个月的 Developer Plan。如果你有想要增强的功能欢迎提交 pull requests 到这个项目。
- 从您自己的Github仓库导入。
- 设置所需的环境变量,并重新部署。
- 如果您需要后续的功能更新,只需在您自己的代码库中同步此代码库,并在 Zeabur 中重新部署以获取最新功能。
## Replit 远程部署
[![在 Repl.it 上运行](https://replit.com/badge/github/yym68686/ChatGPT-Telegram-Bot)](https://replit.com/new/github/yym68686/ChatGPT-Telegram-Bot)
导入 Github 仓库后,设置运行命令
```bash
pip install -r requirements.txt > /dev/null && python3 bot.py
```
在工具侧边栏中选择“Secrets”添加机器人所需的环境变量其中
- WEB_HOOK: Replit 会自动为您分配一个域名,填写 `https://appname.username.repl.co`
- 记得打开“始终开启”
点击屏幕顶部的运行按钮来运行机器人。
## fly.io 远程部署
官方文档: https://fly.io/docs/
使用 Docker 镜像部署 fly.io 应用程序
```bash
flyctl launch --image yym68686/chatgpt:latest
```
在提示时输入应用程序的名称,并选择“否”以初始化 Postgresql 或 Redis。
按照提示进行部署。官方控制面板将提供一个二级域名,可用于访问服务。
设置环境变量
```bash
flyctl secrets set BOT_TOKEN=bottoken
flyctl secrets set API=
# 可选
flyctl secrets set WEB_HOOK=https://flyio-app-name.fly.dev/
flyctl secrets set NICK=javis
```
查看所有环境变量
```bash
flyctl secrets list
```
删除环境变量
```bash
flyctl secrets unset MY_SECRET DATABASE_URL
```
ssh 到 fly.io 容器
```bash
flyctl ssh issue --agent
# ssh 连接
flyctl ssh establish
```
检查 webhook URL 是否正确
```bash
https://api.telegram.org/bot<token>/getWebhookInfo
```
## Docker 本地部署
启动容器
```bash
docker run -p 80:8080 --name chatbot -dit \
-e BOT_TOKEN=your_telegram_bot_token \
-e API= \
-e API_URL= \
-v ./user_configs:/home/user_configs \
yym68686/chatgpt:latest
```
或者如果你想使用 Docker Compose这里有一个 docker-compose.yml 示例:
```yaml
version: "3.5"
services:
chatgptbot:
container_name: chatgptbot
image: yym68686/chatgpt:latest
environment:
- BOT_TOKEN=
- API=
- API_URL=
volumes:
- ./user_configs:/home/user_configs
ports:
- 80:8080
```
在后台运行 Docker Compose 容器
```bash
docker-compose pull
docker-compose up -d
# uni-api
docker-compose -f docker-compose-uni-api.yml up -d
```
将存储库中的Docker镜像打包并上传到Docker Hub
```bash
docker build --no-cache -t chatgpt:latest -f Dockerfile.build --platform linux/amd64 .
docker tag chatgpt:latest yym68686/chatgpt:latest
docker push yym68686/chatgpt:latest
```
一键重启 Docker 镜像
```bash
set -eu
docker pull yym68686/chatgpt:latest
docker rm -f chatbot
docker run -p 8080:8080 -dit --name chatbot \
-e BOT_TOKEN= \
-e API= \
-e API_URL= \
-e GOOGLE_API_KEY= \
-e GOOGLE_CSE_ID= \
-e claude_api_key= \
-v ./user_configs:/home/user_configs \
yym68686/chatgpt:latest
docker logs -f chatbot
```
该脚本用于通过单个命令重启Docker镜像。它首先删除名为“chatbot”的现有Docker容器如果存在。然后它运行一个名为“chatbot”的新Docker容器暴露端口8080并设置各种环境变量。使用的Docker镜像是“yym68686/chatgpt:latest”。最后它跟踪“chatbot”容器的日志。
## 🚀 源代码本地部署
python >= 3.10
直接从源代码运行机器人而不使用docker克隆仓库
```bash
git clone --recurse-submodules --depth 1 -b main --quiet https://github.com/yym68686/ChatGPT-Telegram-Bot.git
```
安装依赖项:
```bash
pip install -r requirements.txt
```
设置环境变量:
```bash
export BOT_TOKEN=
export API=
```
运行:
```bash
python bot.py
```
## 🧩 插件
机器人支持多种插件包括DuckDuckGo 和 Google 搜索、URL 摘要、ArXiv 论文摘要、DALLE-3 画图和代码解释器等。您可以通过设置环境变量来启用或禁用这些插件。
- 如何开发插件?
插件相关的代码全部在本仓库git 子模块ModelMerge里面ModelMerge是我开发的一个独立的仓库用于处理API请求对话历史记录管理等功能。当你使用git clone的--recurse-submodules参数克隆本仓库后ModelMerge会自动下载到本地。插件所有的代码在本仓库中的相对路径为 `ModelMerge/src/ModelMerge/plugins`。你可以在这个目录下添加自己的插件代码。插件开发的流程如下:
1.`ModelMerge/src/ModelMerge/plugins` 目录下创建一个新的 Python 文件,例如 `myplugin.py`。在 `ModelMerge/src/ModelMerge/plugins/__init__.py` 文件中导入你的插件,例如 `from .myplugin import MyPlugin`
2.`ModelMerge/src/ModelMerge/tools/chatgpt.py` 里面的 `function_call_list` 变量中添加你的插件OpenAI tool格式详细的请求体。Claude Gemini tool 不需要额外编写你仅需要填写OpenAI格式的tool请求体程序在请求Gemini或者Claude API的时候会自动转换为Claude/Gemini tool格式。`function_call_list` 是一个字典,键是插件的名称,值是插件的请求体。请保证`function_call_list` 字典的键名保证唯一性,不能和已有的插件键名重复。
3.`ModelMerge/src/ModelMerge/plugins/config.py` 里面的 `PLUGINS` 字典里面添加键值对,键是插件的名称,值是插件的环境变量及其默认值。这个默认值是插件的开关,如果默认值是`True`,那么插件默认是开启的,如果默认值是 `False`,那么插件默认是关闭的,需要在用户在 `/info` 命令里面手动开启。
4. 最后,在 `ModelMerge/src/ModelMerge/plugins/config.py` 里面的函数 `get_tools_result_async` 添加插件调用的代码,当机器人需要调用插件的时候,会调用这个函数。你需要在这个函数里面添加插件的调用代码。
完成上面的步骤,你的插件就可以在机器人中使用了。🎉
## 📄 常见问题
- WEB_HOOK 环境变量有什么用?应该如何使用?
WEB_HOOK 是一个 webhook 地址。具体来说,当 Telegram 机器人收到用户消息时,它会将消息发送到 Telegram 服务器,然后 Telegram 服务器将消息转发到机器人设置的 WEB_HOOK 地址的服务器。因此,当消息发送到机器人时,机器人几乎立即执行处理程序。通过 WEB_HOOK 接收消息比未设置 WEB_HOOK 时的响应时间更快。
当使用 Zeabur、Replit 或 Koyeb 等平台部署机器人时,这些平台会提供一个域名,你需要将其填写在 WEB_HOOK 中,以便机器人接收用户消息。当然,不设置 WEB_HOOK 也是可以的,但机器人的响应时间会稍长一些,虽然差别不大,所以一般来说不需要设置 WEB_HOOK。
当在服务器上部署一个机器人时你需要使用像nginx或caddy这样的反向代理工具将Telegram服务器发送的消息转发到你的服务器这样机器人才能接收到用户消息。因此你需要将WEB_HOOK设置为你服务器的域名并将请求WEB_HOOK的流量转发到机器人所在的服务器和相应端口。例如在caddy中你可以在caddy配置文件/etc/caddy/Caddyfile中这样配置
```caddy
your_webhook_domain.com {
reverse_proxy localhost:8082
}
```
- 为什么我不能使用谷歌搜索?
默认情况下提供DuckDuckGo搜索。Google搜索的官方API需要用户申请。它可以提供GPT之前无法回答的实时信息例如今天微博的热门话题、特定地点的今日天气以及某个人或新闻事件的进展。
- 为什么即使我添加了Google搜索API我还是不能使用搜索功能
有两种可能性:
1. 只有支持工具使用的大型语言模型LLMAPI才能使用搜索功能。目前本项目仅支持 OpenAI、Claude 和 Gemini 系列模型的 API 进行搜索功能。其他模型提供商的 API 目前不支持在本项目中使用工具。如果您有希望适配的模型提供商,可以联系维护者。
2. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API但无法使用搜索功能可能是因为搜索功能未启用。您可以通过 `/info` 命令点击偏好设置来检查搜索功能是否启用。
3. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API请确保你使用的是官方 API如果你使用的是第三方中转 API提供商可能通过网页逆向的方式向你提供 API通过网页逆向的方式提供 API 无法使用 tools use即不能使用本项目所有的插件。如果你确认你使用的是官方 API仍然无法成功搜索请联系开发人员。
- 我如何切换模型?
您可以在聊天窗口中使用 "/info" 命令在 GPT3.5/4/4o 和其他模型之间切换。
- 它可以在一个群组中部署吗?
是的,它支持群组白名单以防止滥用和信息泄露。
- 为什么我把机器人添加到群组后它不能说话?
如果这是您第一次将机器人添加到群聊中您需要在botfather中将群组隐私设置为禁用然后将机器人从群聊中移除并重新添加以便正常使用。
第二种方法是将机器人设置为管理员,这样机器人就可以正常使用了。然而,如果你想将机器人添加到你不是管理员的群聊中,第一种方法更为合适。
另一种可能性是 GROUP_LIST 集不是当前的群聊 ID。请检查是否设置了 GROUP_LISTGROUP_LIST 是群 ID而不是群名称。群 ID 以减号开头,后跟一串数字。
- GROUP_LIST、ADMIN_LIST 和白名单的设置如何影响机器人的行为?
如果未设置白名单所有人都可以使用机器人。如果设置了白名单只有白名单中的用户可以使用机器人。如果设置了GROUP_LIST只有GROUP_LIST中的群组可以使用机器人。如果同时设置了白名单和GROUP_LIST群组中的所有人都可以使用机器人但只有白名单中的用户可以私聊机器人。如果设置了ADMIN_LIST只有ADMIN_LIST中的用户可以使用/info命令来更改机器人的设置。如果未设置ADMIN_LIST所有人都可以使用/info命令来更改机器人的配置。GROUP_LIST 也可以包含频道频道ID以减号开头后跟一串数字。
- 我应该如何设置 API_URL
API_URL 支持所有后缀包括https://api.openai.com/v1/chat/completions、https://api.openai.com/v1 和 https://api.openai.com/。机器人将根据不同的用途自动分配不同的端点。
- 是否有必要配置 web_hook 环境变量?
web_hook 不是强制性的环境变量。你只需要设置域名(必须与 WEB_HOOK 一致)和其他根据你的应用功能所需的环境变量。
- 我用docker compose部署了一个机器人。如果文档放在本地服务器上应该挂载到哪个目录才能生效我需要设置额外的配置和修改代码吗
您可以直接通过聊天框将文档发送给机器人,机器人会自动解析文档。要使用文档对话功能,您需要启用历史对话功能。无需对文档进行额外处理。
- 我还是无法让它正常工作……我想在一个群组中使用它,我已经将 ADMIN_LIST 设置为我自己,并将 GROUP_LIST 设置为那个群组,白名单留空。但是,只有我可以在那个群组中使用它,群组中的其他成员被提示没有权限,这是怎么回事?
这是一个故障排除指南:请仔细检查 GROUP_LIST 是否正确。Telegram 群组的 ID 以负号开头,后跟一系列数字。如果不是,请使用此机器人 [bot](https://t.me/getidsbot) 重新获取群组 ID。
- 我上传了一个文档,但它没有根据文档的内容做出响应。怎么回事?
要使用文档问答功能,您必须先启用历史记录。您可以通过 `/info` 命令开启历史记录,或者通过将环境变量 `PASS_HISTORY` 设置为大于2来默认启用历史记录。请注意启用历史记录将会产生额外费用因此该项目默认不启用历史记录。这意味着在默认设置下无法使用问答功能。在使用此功能之前您需要手动启用历史记录。
- 设置 `NICK` 后,当我 @ 机器人时没有响应,它只在消息以昵称开头时才回复。我怎样才能让它同时响应昵称和 @机器人名
在群聊场景中,如果环境变量 `NICK` 未设置,机器人将接收所有群消息并回应所有消息。因此,有必要设置 `NICK`。设置 `NICK` 后,机器人只会回应以 `NICK` 开头的消息。所以,如果你想 @ 机器人以获得回应,你只需将 NICK 设置为 @botname。这样,当你在群里 @ 机器人时,机器人会检测到消息是以 @botname 开头的,并会回应该消息。
- 历史会保留多少条消息?
所有其他模型使用官方上下文长度设置,例如,`gpt-3.5-turbo-16k` 的上下文是 16k`gpt-4o` 的上下文是 128k`Claude3/3.5` 的上下文是 200k。此限制是为了节省用户成本因为大多数场景不需要高上下文。
- 如何从模型列表中删除默认模型名称?
你可以使用 `CUSTOM_MODELS` 环境变量来完成它。例如,如果你想添加 gpt-4o 并从模型列表中移除 gpt-3.5 模型,请将 `CUSTOM_MODELS` 设置为 `gpt-4o,-gpt-3.5`。如果你想一次性删除所有默认模型,你可以将 `CUSTOM_MODELS` 设置为 `-all,gpt-4o`
- 对话隔离具体是如何工作的?
对话总是基于不同的窗口隔离而不是不同的用户。这意味着在同一个群聊窗口、同一个主题和同一个私聊窗口内都会被视为同一个对话。CHAT_MODE 只影响配置是否隔离。在多用户模式下,每个用户的插件配置、偏好等都是独立的,互不影响。在单用户模式下,所有用户共享相同的插件配置和偏好。然而,对话历史总是隔离的。对话隔离是为了保护用户隐私,确保用户的对话历史、插件配置、偏好等不被其他用户看到。
- 为什么 Docker 镜像很久没有更新了?
Docker 镜像只存储程序的运行环境。目前,程序的运行环境是稳定的,环境依赖几乎没有变化,所以 Docker 镜像没有更新。每次重新部署 Docker 镜像时,它会拉取最新的代码,因此不需要担心 Docker 镜像更新的问题。
- 为什么容器在启动后报告错误 "http connect error or telegram.error.TimedOut: Timed out"?
此问题可能是由于部署 Docker 的服务器无法连接到 Telegram 服务器或 Telegram 服务器的不稳定性引起的。
1. 在大多数情况下,重新启动服务,检查服务器网络环境,或等待 Telegram 服务恢复即可。
2. 此外您可以尝试通过网络钩子与Telegram服务器进行通信这可能会解决问题。
- 如何让 docker 无限重试而不是一开始就停止?
Docker 中的 `--restart unless-stopped` 参数设置容器的重启策略。具体来说:
1. unless-stopped: 这个策略意味着容器如果停止了会自动重启除非它是被手动停止的。换句话说如果容器由于错误或系统重启而停止它会自动重启。然而如果你手动停止了容器例如使用docker stop命令它将不会自行重启。
此参数对于需要连续运行的服务特别有用,因为它确保服务能够在意外中断后自动恢复,而无需手动干预。
2. 示例:假设你有一个运行 web 服务器的 Docker 容器,并且你希望它在崩溃或系统重启时自动重启,但在你手动停止它时不重启。你可以使用以下命令:
```shell
docker run -d --name my-web-server -p 80:80 --restart unless-stopped my-web-server-image
```
在此示例中,名为 my-web-server 的 web 服务器容器将自动重新启动,除非您手动停止它。
- 切换模型,我需要重新输入提示吗?
是的,因为切换模型会重置历史记录,所以您需要重新输入提示。
- PASS_HISTORY 的适当值是什么?
PASS_HISTORY的数量严格等于对话历史中的消息数量。推荐值是2因为系统提示占用了一个消息计数。如果设置为0PASS_HISTORY将自动重置为2以确保对话正常进行。当PASS_HISTORY小于或等于2时机器人的行为可以被视为只记住当前对话即一个问题和一个答案并且下次不会记住之前的问答内容。PASS_HISTORY的最大值没有限制但请注意对话历史中的消息越多每次对话的成本就越高。当未设置PASS_HISTORY时默认值为9999表示对话历史中的消息数量为9999。
- 机器人令牌可以有多个令牌吗?
不,将来它会支持多个机器人令牌。
- 如何使用机器人命令?
1. `/info`: 机器人 `/info` 命令可以查看机器人的配置信息包括当前使用的模型、API URL、API 密钥等。它还可以更改机器人的显示语言、偏好设置和插件设置。
2. `/start`:机器人 `/start` 命令可以查看机器人的使用说明、使用方法和功能介绍。您可以使用 `/start` 命令设置 API 密钥。如果您有官方的 OpenAI API 密钥,请使用以下命令:`/start your_api_key`。如果您使用的是第三方 API 密钥,请使用以下命令:`/start https://your_api_url your_api_key`
3. `/reset`:机器人 `/reset` 命令可以清除机器人的对话消息,并强制机器人停止生成回复。如果你想重置系统提示,请使用以下命令:`/reset your_system_prompt`。但是,`/reset` 命令永远不会恢复机器人的显示语言、偏好设置、插件设置、使用中的模型、API URL、API 密钥、系统提示等。
- 如果 Koyeb 部署失败怎么办?
Koyeb 的免费服务可能有点不稳定,所以部署失败是很常见的。你可以尝试重新部署,如果还是不行的话,考虑换到另一个平台。😊
- 为什么我使用 CUSTOM_MODELS 删除默认模型名称后,再使用 /info 命令检查时它又重新出现了?
如果你使用 `docker-compose.yml` 部署,不要在 `CUSTOM_MODELS` 的值周围添加引号。错误用法:`CUSTOM_MODELS="gpt-4o,-gpt-3.5"`,否则会导致环境变量解析错误,导致默认模型名称再次出现。错误的方式会被解析为删除 `gpt-3.5"` 模型,这将导致默认模型名称 `gpt-3.5` 未被删除。正确的写法是:`CUSTOM_MODELS=gpt-4o,-gpt-3.5`
## 参考文献
https://core.telegram.org/bots/api
https://github.com/acheong08/ChatGPT
https://github.com/franalgaba/chatgpt-telegram-bot-serverless
https://github.com/gpchelkin/scdlbot/blob/d64d14f6c6d357ba818e80b8a0a9291c2146d6fe/scdlbot/__main__.py#L8
消息使用的markdown渲染是我的另一个[项目](https://github.com/yym68686/md2tgmd)。
duckduckgo AI: https://github.com/mrgick/duck_chat
## 赞助商
我们感谢以下赞助商的支持:
<!-- $300+$380+¥1200+¥300+$30+$25+$20+¥50 -->
- @fasizhuanqian: 300 USDT
- @ZETA: $380
- @yuerbujin: ¥1200
- @RR5AM: ¥300
- @IKUNONHK: 30 USDT
- @miya0v0: 30 USDT
- [@Zeabur](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss): $25
- @Bill_ZKE: 20 USDT
- @wagon_look¥50
<!-- [![Deployed on Zeabur](https://zeabur.com/deployed-on-zeabur-dark.svg)](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss) -->
## 如何赞助我们
如果您想支持我们的项目,您可以通过以下方式赞助我们:
1. [PayPal](https://www.paypal.me/yym68686)
2. [USDT-TRC20](https://pb.yym68686.top/~USDT-TRC20)USDT-TRC20 钱包地址:`TLFbqSv5pDu5he43mVmK1dNx7yBMFeN7d8`
3. [微信](https://pb.yym68686.top/~wechat)
4. [支付宝](https://pb.yym68686.top/~alipay)
感谢您的支持!
## 星星历史
<a href="https://github.com/yym68686/ChatGPT-Telegram-Bot/stargazers">
<img width="500" alt="星历史图表" src="https://api.star-history.com/svg?repos=yym68686/ChatGPT-Telegram-Bot&type=Date">
</a>
## 许可证
本项目根据 GPLv3 许可证授权,这意味着您可以自由复制、分发和修改该软件,只要所有修改和衍生作品也以相同的许可证发布。

View File

@@ -0,0 +1,22 @@
version: "3.5"
services:
chatgptbot:
container_name: chatgptbot
image: yym68686/chatgpt:latest
environment:
- BOT_TOKEN=7908126551:AAE8VhwwfcZ3ru-ecJJo_bMADYADgh1Shzs
- whitelist=6868680170
- ADMIN_LIST=6868680170
- temperature=0.7
- LANGUAGE=Simplified Chinese
- PASS_HISTORY=5
- GOOGLE_AI_API_KEY=AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM
- API_URL=https://api.x.ai/v1/chat/completions
- API=xai-pQCto8hXbSLey5rHjohMZGjqaOlSwgFhofEckr5a7q9wQaJbpAV5xyEVGoq8JbhBoX1QVgUm5GzK2DkG
- GROQ_API_KEY=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
- CUSTOM_MODELS=grok-2-latest
- GPT_ENGINE=grok-2-latest
volumes:
- ./user_configs:/home/user_configs
ports:
- 3080:8080

View File

@@ -0,0 +1,2 @@
https://github.com/yym68686/ChatGPT-Telegram-Bot

View File

@@ -1,5 +1,3 @@
# https://hub.docker.com/r/bitnami/gitea
version: '2'
services:
postgresql:
@@ -12,7 +10,7 @@ services:
- POSTGRESQL_PASSWORD=Superwdd.12
# ALLOW_EMPTY_PASSWORD is recommended only for development.
gitea:
image: docker.io/bitnami/gitea:1.19.3-debian-11-r0
image: docker.io/bitnami/gitea:1.24.5-debian-12-r0
volumes:
- '/data/gitea/gitea_data:/bitnami/gitea'
environment:
@@ -21,14 +19,14 @@ services:
- GITEA_DATABASE_USERNAME=bn_gitea
- GITEA_DATABASE_PASSWORD=Superwdd.12
- GITEA_ADMIN_USER=zeaslity
- GITEA_ADMIN_PASSWORD=lovemm.23
- GITEA_ADMIN_PASSWORD=loveff.cxc.23
- GITEA_ADMIN_EMAIL=wdd@107421.xyz
- GITEA_HTTP_PORT=3000
# - GITEA_DOMAIN=gitea.107421.xyz
- GITEA_ROOT_URL=https://gitea.107421.xyz
- GITEA_SSH_LISTEN_PORT=22222
- GITEA_APP_NAME=Gitea-闲下来就喝杯茶吧
- GITEA_DOMAIN=192.168.35.70
- GITEA_PROTOCOL=http
- GITEA_RUN_MODE=prod
- ARCHIVE_CLEANUP_ENABLED = true
- ARCHIVE_CLEANUP_TIMEOUT = 168h #设置归档文件过期时间默认7天
ports:
- '3000:3000'
- '22222:22222'

View File

@@ -1,16 +0,0 @@
#!/bin/bash
export MIOIO_DATA_PATH=/var/lib/docker/minio-pv
mkdir -p ${MIOIO_DATA_PATH}
chown -R 1001:1001 ${MIOIO_DATA_PATH}
docker run -d \
--env MINIO_ACCESS_KEY="cmii" \
--env MINIO_SECRET_KEY="boge14@Level5" \
--volume ${MIOIO_DATA_PATH}:/data \
--network host \
--name minio-server \
bitnami/minio:2021.11.24-debian-10-r0

View File

@@ -1,14 +0,0 @@
#!/bin/bash
docker run -d \
-e MODE=standalone \
-e MYSQL_SERVICE_HOST=localhost \
-e MYSQL_SERVICE_PORT=33306 \
-e MYSQL_SERVICE_DB_NAME=nacos_config \
-e MYSQL_SERVICE_USER=root \
-e MYSQL_SERVICE_PASSWORD=boge14@Level5 \
--name nacos-server \
--network host \
nacos/nacos-server:2.0.2

View File

@@ -1,8 +1,14 @@
#!/bin/bash
curl -X GET "https://api.cloudflare.com/client/v4/user/tokens/verify" \
-H "Authorization: Bearer T7LxBemfe8SNGWkT9uz2XIc1e22ifAbBv_POJvDP" \
-H "Content-Type:application/json"
#export DOMAIN_NAME=chat.107421.xyz
export DOMAIN_NAME=push.107421.xyz
# 可以操作DNS的API Token
export CF_Token="oXJRP5XI8Zhipa_PtYtB_jy6qWL0I9BosrJEYE8p"
export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191"
export CF_Zone_ID="511894a4f1357feb905e974e16241ebb"

View File

@@ -69,7 +69,7 @@ sudo sysctl -p /etc/sysctl.d/proxy-wdd.conf
sysctl net.ipv4.tcp_congestion_control
sudo ethtool -K enp0s3 gro on
sudo ethtool -K enp0s3 gso on
sudo ethtool -K enp0s3 tso on
sudo ethtool -K ens3 gro on
sudo ethtool -K ens3 gso on
sudo ethtool -K ens3 tso on

View File

@@ -1,38 +0,0 @@
{
"listen": "0.0.0.0",
"port": 29999,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "RoMoH00dOl3zaQjdUKB6W0SS-wDYENgI3I7cREYwp1M",
"flow": "xtls-rprx-vision"
}
],
"decryption": "none"
},
"streamSettings": {
"network": "tcp",
"security": "reality",
"realitySettings": {
"dest": "speed.cloudflare.com",
"serverNames": [
"speed.cloudflare.com"
],
"privateKey": "yNsDptp-3i-KqhLHA-RBLrVlJuiYeDUekirp-fkerQA",
"shortIds": [
"abc124cc",
"666asdcd"
]
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls",
"quic"
],
"routeOnly": true
}
}
}

View File

@@ -0,0 +1,82 @@
{
"log": {
"loglevel": "info"
},
"inbounds": [
{
"port": 24443,
"protocol": "vless",
"tag": "proxy",
"settings": {
"clients": [
{
"id": "f8702759-f402-4e85-92a6-8540d577de22",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
],
"routing": {
"domainStrategy": "AsIs",
"domainMatcher": "hybrid",
"rules": [
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24444,
"protocol": "vless",
"tag": "fv-ge-frk",
"settings": {
"clients": [
{
"id": "6055eac4-dee7-463b-b575-d30ea94bb768",
"flow": "xtls-rprx-vision",
"email": "franklin@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-ge-frk",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "de-01.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-ge-frk"
],
"outboundTag": "fv-ge-frk"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24445,
"protocol": "vless",
"tag": "fv-kr-sel",
"settings": {
"clients": [
{
"id": "1cd284b2-d3d8-4165-b773-893f836c2b51",
"flow": "xtls-rprx-vision",
"email": "seoul@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-kr-sel",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "kr.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-kr-sel"
],
"outboundTag": "fv-kr-sel"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24446,
"protocol": "vless",
"tag": "fv-jp-tyk",
"settings": {
"clients": [
{
"id": "bf0e9c35-84a9-460e-b5bf-2fa9f2fb3bca",
"flow": "xtls-rprx-vision",
"email": "seoul@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-jp-tyk",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "jpjp.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-jp-tyk"
],
"outboundTag": "fv-jp-tyk"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24447,
"protocol": "vless",
"tag": "fv-uk-lon",
"settings": {
"clients": [
{
"id": "adc19390-373d-4dfc-b0f6-19fab1b6fbf6",
"flow": "xtls-rprx-vision",
"email": "london@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-uk-lon",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "uk-02.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-uk-lon"
],
"outboundTag": "fv-uk-lon"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24448,
"protocol": "vless",
"tag": "fv-sgp",
"settings": {
"clients": [
{
"id": "e31bc28e-8ebd-4d72-a98e-9227f26dfac3",
"flow": "xtls-rprx-vision",
"email": "singapore@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-sgp",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "sg-01.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-sgp"
],
"outboundTag": "fv-sgp"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24452,
"protocol": "vless",
"tag": "fastestvpm-hongkong",
"settings": {
"clients": [
{
"id": "cdf0b19a-9524-48d5-b697-5f10bb567734",
"flow": "xtls-rprx-vision",
"email": "hongkong@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fastestvpm-hongkong",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "hk.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fastestvpm-hongkong"
],
"outboundTag": "fastestvpm-hongkong"
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,12 +4,13 @@
},
"inbounds": [
{
"port": 443,
"port": 24443,
"protocol": "vless",
"tag": "proxy",
"settings": {
"clients": [
{
"id": "b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0",
"id": "f8702759-f402-4e85-92a6-8540d577de22",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
@@ -17,11 +18,6 @@
],
"decryption": "none",
"fallbacks": [
{
"name": "xx.tc.hk.go.107421.xyz",
"alpn": "h2",
"dest": "@trojan-h2"
},
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
@@ -42,11 +38,6 @@
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
},
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
}
],
"minVersion": "1.2",
@@ -64,31 +55,28 @@
"tls"
]
}
},
{
"listen": "@trojan-h2",
"protocol": "trojan",
"settings": {
"clients": [
{
"email": "ice@qq.com",
"password": "Vad3.123a)asd1234-asdasd.asdazzS.123",
"level": 0
}
]
},
"streamSettings": {
"network": "h2",
"security": "none",
"httpSettings": {
"path": "/status"
}
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
]
],
"routing": {
"domainStrategy": "AsIs",
"domainMatcher": "hybrid",
"rules": [
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
}
]
}
}

View File

@@ -1,126 +0,0 @@
{
"log": {
"loglevel": "debug"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0",
"flow": "xtls-rprx-vision"
}
],
"decryption": "none",
"fallbacks": [
{
"name": "xx.tc.hk.go.107421.xyz",
"path": "/status",
"dest": 5000,
"xver": 1
},
{
"name": "book.107421.xyz",
"dest": 5003,
"xver": 1
},
{
"name": "book.107421.xyz",
"alpn": "h2",
"dest": 5004,
"xver": 1
},
{
"dest": 5001,
"xver": 1
},
{
"alpn": "h2",
"dest": 5002,
"xver": 1
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"alpn": ["h2", "http/1.1"],
"certificates": [
{
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
]
}
}
},
{
"port": 5000,
"listen": "127.0.0.1",
"protocol": "vless",
"settings": {
"clients": [
{
"id": "481d1403-de9a-5ae1-b921-18c04a4a9da0",
"level": 0,
"email": "dd@qq.com"
}
],
"decryption": "none"
},
"streamSettings": {
"network": "ws",
"security": "tls",
"wsSettings": {
"acceptProxyProtocol": true,
"path": "/status"
},
"tlsSettings": {
"alpn": ["h2", "http/1.1"],
"certificates": [
{
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
}
]
}
}
},
{
"listen": "0.0.0.0",
"port": 29999,
"protocol": "trojan",
"settings": {
"clients": [
{
"password": "V2ryStr0ngP0ss"
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"alpn": [
"h2",
"http/1.1"
],
"certificates": [
{
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
}
]
}
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

View File

@@ -20,6 +20,13 @@
{
"name": "pan.107421.xyz",
"dest": 5003,
"alpn": "h2",
"xver": 2
},
{
"name": "push.107421.xyz",
"dest": 5004,
"alpn": "h2",
"xver": 2
},
{
@@ -62,6 +69,11 @@
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/pan.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/pan.107421.xyz_ecc/pan.107421.xyz.key"
},
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/push.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/push.107421.xyz_ecc/push.107421.xyz.key"
}
],
"minVersion": "1.2",

View File

@@ -0,0 +1,308 @@
{
"log": {
"loglevel": "error"
},
"inbounds": [
{
"port": 443,
"tag": "Seoul-amd64-01",
"protocol": "vless",
"settings": {
"clients": [
{
"id": "1089cc14-557e-47ac-ac85-c07957b3cce3",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.s4.cc.hh.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.s4.cc.hh.107421.xyz_ecc/xx.s4.cc.hh.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 20443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Tokyo-amd64-02",
"settings": {
"clients": [
{
"id": "21dab95b-088e-47bd-8351-609fd23cb33c",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.t2.ll.c0.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.t2.ll.c0.107421.xyz_ecc/xx.t2.ll.c0.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 21443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Osaka-amd64-01",
"settings": {
"clients": [
{
"id": "4c2dd763-56e5-408f-bc8f-dbf4c1fe41f9",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.o1.vl.s4.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.o1.vl.s4.107421.xyz_ecc/xx.o1.vl.s4.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 22443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Phoenix-amd64-02",
"settings": {
"clients": [
{
"id": "de576486-e254-4d9d-949a-37088358ec23",
"flow": "xtls-rprx-vision",
"email": "phoneix@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.p2.vl.s4.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.p2.vl.s4.107421.xyz_ecc/xx.p2.vl.s4.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "Seoul-amd64-01"
},
{
"protocol": "blackhole",
"tag": "block"
},
{
"tag": "Seoul-amd64-01->Tokyo-amd64-02",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "140.238.52.228",
"port": 1234
}
]
}
},
{
"tag": "Seoul-amd64-01->Phoenix-amd64-02",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "129.146.171.163",
"port": 1234
}
]
}
},
{
"tag": "Seoul-amd64-01->Osaka-amd64-01",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "140.83.84.142",
"port": 1234
}
]
}
}
],
"routing": {
"domainStrategy": "IPIfNonMatch",
"rules": [
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01"
],
"outboundTag": "Seoul-amd64-01"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Tokyo-amd64-02"
],
"outboundTag": "Seoul-amd64-01->Tokyo-amd64-02"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Phoenix-amd64-02"
],
"outboundTag": "Seoul-amd64-01->Phoenix-amd64-02"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->London-amd64-01"
],
"outboundTag": "Seoul-amd64-01->London-amd64-01"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Osaka-amd64-01"
],
"outboundTag": "Seoul-amd64-01->Osaka-amd64-01"
}
]
}
}

View File

@@ -0,0 +1,106 @@
# -----------------------------------------------------------------------------
# Clash 优化配置文件 (Optimized Clash Configuration File)
#
# 本配置旨在解决国内网站访问慢、DNS 解析超时等常见问题。
# 核心思想:通过精细化的 DNS 配置,实现国内外域名智能分流解析,
# 配合高效的规则集,达到最佳的网络访问体验。
# -----------------------------------------------------------------------------
# [通用设置] General Settings
# 混合端口:同时支持 HTTP 和 SOCKS5 代理协议。
mixed-port: 7890
# 允许局域网连接:允许同一局域网下的其他设备通过此 Clash 实例上网。
allow-lan: true
# 模式rule规则模式根据规则进行分流。
mode: rule
# 日志级别info。记录一般信息和错误方便排查问题。可选silent, error, warning, info, debug。
log-level: info
# 外部控制器:用于连接 Dashboard 面板进行图形化管理。
external-controller: '127.0.0.1:9090'
# --- 这是解决所有问题的关键部分 ---
dns:
# [总开关] 启用 Clash 内置的 DNS 服务器。必须为 true 才能使后续所有 DNS 设置生效。
enable: true
# [监听地址] DNS 服务器监听的地址和端口。'0.0.0.0:53' 表示允许局域网内其他设备使用 Clash 作为 DNS 服务器。
# 如果你只希望本机使用,可以改为 '127.0.0.1:53'。
listen: 0.0.0.0:53
# [IPv6 解析] 禁用 IPv6 解析。在国内大部分网络环境下,禁用可以避免不必要的解析延迟和连接问题。
ipv6: false
# [增强模式] 强烈推荐使用 fake-ip 模式,尤其是在开启 TUN 模式时。
# 它能通过返回虚假 IP 地址来接管所有应用的 DNS 请求,从而实现基于域名的精细化规则代理。
# 这是解决非代理感知程序(如命令行工具、某些桌面应用)代理问题的最佳方案。
enhanced-mode: fake-ip
# [Fake IP 地址池] fake-ip 模式使用的虚假 IP 地址范围。通常无需修改。
fake-ip-range: 198.18.0.1/16
# 仅用于解析下方 nameserver 和 fallback 中的 DoH/DoT 域名。必须使用纯 IP 地址。
# 这里选用可靠的国内公共 DNS。
default-nameserver:
- 223.5.5.5
- 119.29.29.29
# 用于解析国内域名。并发请求,谁快用谁。
# 使用国内的 DoH (DNS over HTTPS) 服务可以有效防止运营商劫持,且解析国内 CDN 准确、迅速。
nameserver:
- https://doh.pub/dns-query # 腾讯 DNSPod (DoH)
- https://dns.alidns.com/dns-query # 阿里 DNS (DoH)
# 当 nameserver 的解析结果被 fallback-filter 判定为污染时,将使用此组 DNS。
# 必须使用国外的、无污染的加密 DNS 服务,以确保能正确解析被 GFW 干扰的域名。
fallback:
- https://dns.google/dns-query # Google DNS (DoH)
- https://1.1.1.1/dns-query # Cloudflare DNS (DoH)
- tls://8.8.4.4:853 # Google DNS (DoT)
# [抗污染过滤器] 这是实现国内外智能分流的核心。
fallback-filter:
# [启用 GeoIP 过滤] 必须为 true。
geoip: true
# [GeoIP 信任代码] 仅当 nameserver 解析出的 IP 地址地理位置为中国 (CN) 时,才信任该结果。
# 如果解析出的 IP 在国外,则判定为 DNS 污染,转而使用 fallback 组的结果。
geoip-code: CN
# [代理节点] Proxies
# 此处请填写你自己的代理服务器信息。以下为示例格式。
proxies:
- name: "My-Proxy-Server-01"
type: ss
server: server_address
port: 443
cipher: aes-256-gcm
password: "password"
udp: true
# [代理组] Proxy Groups
# 用于组织代理节点,实现负载均衡、自动故障切换等策略。
proxy-groups:
- name: "PROXY"
type: select
proxies:
- "My-Proxy-Server-01"
- DIRECT
# [规则集] Rules
# 规则按从上到下的顺序进行匹配。
rules:
# 广告拦截
- DOMAIN-SUFFIX,ad.com,REJECT
# 常用国内网站直连
- DOMAIN-SUFFIX,cn,DIRECT
- DOMAIN-SUFFIX,163.com,DIRECT
- DOMAIN-SUFFIX,126.com,DIRECT
- DOMAIN-SUFFIX,qq.com,DIRECT
- DOMAIN-SUFFIX,tencent.com,DIRECT
- DOMAIN-SUFFIX,baidu.com,DIRECT
- DOMAIN-SUFFIX,taobao.com,DIRECT
- DOMAIN-SUFFIX,alipay.com,DIRECT
- DOMAIN-SUFFIX,jd.com,DIRECT
- DOMAIN-SUFFIX,zhihu.com,DIRECT
- DOMAIN-SUFFIX,weibo.com,DIRECT
- DOMAIN-SUFFIX,bilibili.com,DIRECT
# 局域网地址直连
- IP-CIDR,192.168.0.0/16,DIRECT
- IP-CIDR,10.0.0.0/8,DIRECT
- IP-CIDR,172.16.0.0/12,DIRECT
- IP-CIDR,127.0.0.0/8,DIRECT
# 中国大陆 IP 地址直连
- GEOIP,CN,DIRECT
# 剩余所有流量走代理
- MATCH,PROXY

View File

@@ -0,0 +1,411 @@
#--------------------------------------------------------------------------------#
# Clash 专家级配置文件 (适配 Clash.Meta 核心) #
#--------------------------------------------------------------------------------#
#
# 本配置文件专为在中国大陆网络环境中使用而设计,旨在提供一套自动化、智能化、高可用性
# 的网络流量管理方案。
#
# 核心特性:
# 1. TUN 模式: 接管系统所有网络流量,实现真正的全局透明代理。
# 2. 规则集 (Rule Providers): 动态从网络加载和更新分流规则,免去手动维护烦恼。
# 3. 分割 DNS (Split DNS): 智能区分国内外域名解析,有效抗 DNS 污染,兼顾速度与准确性。
# 4. 逻辑化规则排序: 通过精心设计的规则匹配顺序,实现精确的流量控制。
#
#--------------------------------------------------------------------------------#
#----------------#
# 常规配置 #
#----------------#
# HTTP 代理端口
port: 7890
# SOCKS5 代理端口
socks-port: 7891
# 允许局域网连接,设为 true 后,局域网内其他设备可将本机作为网关使用
allow-lan: true
# 代理模式rule 表示规则模式,是本配置的核心
mode: rule
# 日志级别info 级别提供了足够的信息且不过于冗长
log-level: info
# 外部控制器,用于让 GUI 客户端 (如 Clash Verge) 或 WebUI (如 yacd) 控制 Clash 核心
external-controller: '127.0.0.1:9090'
# 外部 UI指定一个 WebUI 面板的目录,'dashboard' 是一个常见的选择
# external-ui: dashboard
#----------------#
# DNS 配置 #
#----------------#
# DNS 模块是实现智能分流和抗污染的关键
dns: # 启用 DNS 服务器
enable: true
# 监听地址,'0.0.0.0:53' 使 Clash DNS 可为局域网内其他设备服务
# 如果只为本机服务,可设为 '127.0.0.1:53'
listen: 0.0.0.0:53
# 优先使用 IPv4 DNS 解析
ipv6: false
# 增强模式fake-ip 是 TUN 模式下实现域名路由的基石
# 它会为域名分配一个虚假的 IP 地址,使 Clash 能在 IP 层识别出原始域名
enhanced-mode: fake-ip
# Fake-IP 地址池,使用 IETF 保留的地址段,避免与公网地址冲突
fake-ip-range: 198.18.0.1/16
# Fake-IP 例外名单对于这些域名Clash 将返回其真实的 IP 地址
# 这对于一些无法处理 Fake-IP 的内网服务或特定应用至关重要
fake-ip-filter:
- localhost
- '*.lan'
- '*.local'
- '*.arpa'
- time.*.com
- ntp.*.com
- time.*.com
- +.market.xiaomi.com
- localhost.ptlogin2.qq.com
- '*.msftncsi.com'
- www.msftconnecttest.com
# [核心优化] 默认 DNS 服务器 (IP 格式)
# 用于解析 nameserver 和 fallback 中的 DNS 服务器域名,以及代理节点的域名。
# 必须使用纯 IP 地址,这是打破解析死锁、解决 DNS 超时问题的关键。
default-nameserver:
- 223.5.5.5
- 180.76.76.76 # 百度DNS
- 119.29.29.29
# [优化] 主 DNS 服务器列表 (国内,加密 DoH)
# 会与 Fallback DNS 并发请求,如果返回的 IP 是国内 IP则立即采用速度快
nameserver:
- 223.5.5.5 # 阿里云
- 180.76.76.76 # 百度DNS
- 180.184.1.1 # 字节跳动
- 1.2.4.8 # CNNIC DNS
# - https://dns.alidns.com/dns-query # 阿里 DoH DNS
# 备用 DNS 服务器列表 (国外,加密)
# 用于解析国外域名。当主 DNS 返回国外 IP 时Clash 会认为可能被污染,
# 并采用 Fallback DNS 的解析结果,以确保准确性
fallback:
- https://dns.google/dns-query # Google DNS (DoH)
- https://dns.cloudflare.com/dns-query # Cloudflare DNS (DoH)
- https://dns.quad9.net/dns-query # IBM quad9
- https://dns.opendns.com/dns-query # CISCO OpenDNS
- https://dns.adguard-dns.com/dns-query # AdGuard DNS
- tls://8.8.8.8:853 # Google DoT (纯IP)
- tls://1.1.1.1:853 # Cloudflare DoT (纯IP)
- tls://8.8.4.4:853 # Google DNS (DoT)
# Fallback DNS 例外名单,匹配此列表的域名将只使用主 DNS 解析
fallback-filter:
geoip: true
geoip-code: CN # 如果是国内的网址使用nameserver解析到的地址
rule-set: direct
# Lookup domains via specific nameservers
# 以下规则强制所有已知国内域名走最快的 IP DNS彻底解决国内域名解析超时。
nameserver-policy:
'rule-set:direct':
- 223.5.5.5
- 180.184.1.1
- 119.29.29.29
'rule-set:apple':
- 119.29.29.29
- 223.5.5.5
'rule-set:icloud':
- 119.29.29.29
- 223.5.5.5
+.hq.cmcc:
- '192.168.78.39'
+.ops.uavcmlc.com:
- '192.168.34.40'
+.uavcmlc.com:
- '192.168.34.40'
ir.hq.cmcc:
- '192.168.78.39'
oa.cdcyy.cn:
- '192.168.78.39'
# 使用系统的hosts文件
use-system-hosts: true
# 请求DoH的DNS时 使用http3访问
prefer-h3: false
# DNS也遵循规则进行解析
respect-rules: false
# [关键] 代理节点域名解析必须用纯IP的国内DNS
proxy-server-nameserver:
- 223.5.5.5
- 119.29.29.29
# 直连模式下的DNS服务器
direct-nameserver:
- 192.168.78.39
- 119.29.29.29 # 腾讯 DNSPod
- 114.114.114.114 # 114 DNS
- 223.5.5.5 # 阿里 DNS
# 禁止远程调试
external-controller-cors: { }
##----------------#
## TUN 模式配置 #
##----------------#
## TUN 模式通过创建虚拟网卡,在系统网络层接管所有流量
#tun:
# # 启用 TUN 模式
# enable: true
# # 协议栈,'system' 在大多数系统上性能最佳
# # 在 macOS 上或遇到兼容性问题时可尝试 'gvisor'
# stack: system
# # DNS 劫持,将所有发往 53 端口的 DNS 请求重定向到 Clash 的 DNS 服务器
# # 这是强制所有应用使用 Clash DNS 的关键
# dns-hijack:
# - 'any:53'
# # 自动路由Clash 会自动配置系统路由表,将全局流量导向 TUN 网卡
# # 开启此项后,无需再进行任何手动网络设置
# auto-route: true
# # 自动检测出口网卡,适用于大多数单网卡设备
# # 如果设备有多个物理网卡,建议关闭此项并手动指定 interface-name
# auto-detect-interface: true
# [优化] 严格路由模式
# 开启后可防止 DNS 泄露,并解决在某些系统上 DNS 劫持不生效的问题。
# 注意:此设置会使局域网内的其他设备无法访问本机。如果不需要共享代理,建议开启。
# strict-route: true
#------------------------------------------------------------------#
# 代理节点 (Proxies) 和策略组 (Proxy Groups) - 用户需自行填充 #
#------------------------------------------------------------------#
#
# 请将您的订阅链接转换后,将 proxies 和 proxy-groups 的内容粘贴到此处
proxies:
proxy-groups:
- name: 🚀 节点选择
type: select
proxies:
- TC-HongKong
- BFC-LosAngles
- FV-HongKong
- Care-DEU-Dusseldorf-R-TCHK
- Oracle-KOR-Seoul-R-TCHK
- Oracle-JPN-Tokyo-R-TCHK
- Oracle-USA-Phoenix-R-TCHK
- Care-DEU-Dusseldorf
- Oracle-KOR-Seoul
- FV-DEU-Frankfurt
- FV-KOR-Seoul
- FV-JPN-Tokyo
- FV-GBR-London
- FV-USA-LosAngles
- CF-HongKong-R-TCHK
- FV-SGP
- CF_VIDEO_1
- CF_VIDEO_2
- Oracle-JPN-Tokyo-R-OSel
- Oracle-JPN-Osaka-R-OSel
- Oracle-USA-Phoneix-R-OSel
- TC-CHN-Shanghai
- ♻️ 自动选择
- DIRECT
- name: ♻️ 自动选择
type: url-test
url: https://www.gstatic.com/generate_204
interval: 300
tolerance: 50
proxies:
- BFC-LosAngles
- TC-HongKong
- Oracle-JPN-Tokyo-R-TCHK
- Oracle-USA-Phoenix-R-TCHK
- Oracle-KOR-Seoul
- Care-DEU-Dusseldorf
- Oracle-JPN-Tokyo-R-OSel
- Oracle-JPN-Osaka-R-OSel
- Oracle-USA-Phoneix-R-OSel
- name: 🌍 国外媒体
type: select
proxies:
- 🚀 节点选择
- ♻️ 自动选择
- 🎯 全球直连
- name: 📲 电报信息
type: select
proxies:
- 🚀 节点选择
- ♻️ 自动选择
- 🎯 全球直连
- name: Ⓜ️ 微软服务
type: select
proxies:
- 🎯 全球直连
- 🚀 节点选择
- name: 🍎 苹果服务
type: select
proxies:
- 🎯 全球直连
- 🚀 节点选择
- name: 💩 工作直连
type: select
proxies:
- DIRECT
- onetools-35-71
- name: 💩 工作代理
type: select
proxies:
- onetools-35-71
- DIRECT
- name: 🎯 全球直连
type: select
proxies:
- DIRECT
- 🚀 节点选择
- ♻️ 自动选择
- name: 🛑 全球拦截
type: select
proxies:
- REJECT
- DIRECT
- name: 🍃 应用净化
type: select
proxies:
- REJECT
- DIRECT
- name: 🐟 漏网之鱼
type: select
proxies:
- 🚀 节点选择
- 🎯 全球直连
- ♻️ 自动选择
- TC-HongKong
- Oracle-KOR-Seoul
#----------------#
# 规则集定义 #
#----------------#
# Rule Providers 用于从网络动态加载规则列表,实现规则的自动更新
rule-providers: # 广告、追踪器、恶意域名规则集
reject:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/reject.txt"
path: ./ruleset/reject.yaml
interval: 604800 # 更新间隔: 7天
# iCloud 服务规则集
icloud:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/icloud.txt"
path: ./ruleset/icloud.yaml
interval: 604800
# 苹果服务规则集
apple:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/apple.txt"
path: ./ruleset/apple.yaml
interval: 604800
# 谷歌服务规则集
google:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/google.txt"
path: ./ruleset/google.yaml
interval: 604800
# 需要代理的域名规则集
proxy:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/proxy.txt"
path: ./ruleset/proxy.yaml
interval: 604800
# 需要直连的域名规则集
direct:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/direct.txt"
path: ./ruleset/direct.yaml
interval: 604800
# 私有网络域名规则集
private:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/private.txt"
path: ./ruleset/private.yaml
interval: 604800
# 中国大陆 IP 段规则集
cncidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/cncidr.txt"
path: ./ruleset/cncidr.yaml
interval: 604800
# 局域网 IP 段规则集
lancidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/lancidr.txt"
path: ./ruleset/lancidr.yaml
interval: 604800
# Telegram 服务器 IP 段规则集
telegramcidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/telegramcidr.txt"
path: ./ruleset/telegramcidr.yaml
interval: 604800
#----------------#
# 分流规则 #
#----------------#
# 规则按从上到下的顺序进行匹配,一旦匹配成功,后续规则将不再执行
rules: # 1. 广告、追踪器拦截规则 (最高优先级)
# 直接拒绝连接,提升网页加载速度和隐私保护
- RULE-SET,reject,REJECT
# [新增] DNS服务器IP直连防止DNS请求走代理
- IP-CIDR,8.8.8.8/32,DIRECT,no-resolve
- IP-CIDR,8.8.4.4/32,DIRECT,no-resolve
- IP-CIDR,1.1.1.1/32,DIRECT,no-resolve
- IP-CIDR,1.0.0.1/32,DIRECT,no-resolve
- IP-CIDR,223.5.5.5/32,DIRECT,no-resolve
- IP-CIDR,119.29.29.29/32,DIRECT,no-resolve
# [优化] 核心国内流量直连规则 (IP 维度)
# 将中国大陆的 IP 地址段置于高优先级。这是解决国内网站访问缓慢和超时的关键。
# 任何目标地址在此列表内的连接都会被立即直连,无需进行 DNS 查询和 GEOIP 判断。
- RULE-SET,cncidr,DIRECT
# 工作代理模式
- DOMAIN-SUFFIX,cdcyy.cn,💩 工作直连
- DOMAIN-SUFFIX,hq.cmcc,💩 工作直连
- DOMAIN-SUFFIX,wdd.io,💩 工作直连
- DOMAIN-SUFFIX,harbor.cdcyy.com.cn,💩 工作直连
- DOMAIN-SUFFIX,ecs.io,💩 工作直连
- DOMAIN-SUFFIX,uavcmlc.com,💩 工作直连
# 2. 本地/内网流量直连规则
# 确保局域网设备和服务的访问不受代理影响
- RULE-SET,lancidr,DIRECT
- RULE-SET,private,DIRECT
# 3. 明确的国内服务直连规则
# 优先匹配已知需要直连的域名和服务 (Apple, iCloud 等)
- RULE-SET,icloud,🍎 苹果服务
- RULE-SET,apple,🍎 苹果服务
- RULE-SET,direct,🎯 全球直连
# 4. 明确的代理规则
# 匹配已知需要代理的服务 (Google, Telegram, 以及其他国际服务)
- RULE-SET,google,🌍 国外媒体
- RULE-SET,telegramcidr,📲 电报信息
- RULE-SET,proxy,🌍 国外媒体
- # 5. 基于地理位置的补充规则
- # 所有目标 IP 位于中国大陆的流量都直连
- # 这条规则作为对域名规则的补充,确保国内 IP 流量的直连
- GEOIP,CN,DIREC
# 6. 最终的兜底规则 (最低优先级)
# 所有未匹配到以上任何规则的流量,都走代理
# 这是确保未知的新网站或国外服务能正常访问的关键
- MATCH,🐟 漏网之鱼

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
请查阅clash的配置规格修改上述的配置文件要求对配置进行详细的中文注释说明。
## 参考配置
- https://en.clash.wiki/configuration/getting-started.html
## 修改功能说明,配置最终需要满足如下条件
- 开启TUN模式规则模式下在GFW之内使用
- 访问CN的IP及网址是直连状态,无需任何代理,使用223.5.5.5 119.29.29.29作为DNS
- 访问特殊网址规则,按照clash中规则定义进行访问
- 访问规则之外的CN之外的IP及网址,走代理访问,使用8.8.8.8 1.1.1.1作为DNS
- 规则模式下,在GFW之内使用
- 同上述规则
请查阅clash的配置规格修改上述的配置文件要求对配置进行详细的中文注释说明。
## 参考配置
- https://en.clash.wiki/configuration/getting-started.html
请分析上述的配置文件,无论是否开启TUN模式,在规则模式之后
访问国内的网址出现大量错误,典型的错误如下
[TCP] dial DIRECT (match RuleSet/cncidr) 127.0.0.1:50173 --> www.zhihu.com:443 error: dns resolve failed: context deadline exceeded
[TCP] dial 🎯 全球直连 (match RuleSet/direct) 127.0.0.1:56064 --> static.zhihu.com:443 error: dns resolve failed: context deadline exceeded
访问网址非常卡顿,请给出解决方案.考虑自建DNS服务器是否能够解决问题
请查阅clash的配置规格修改上述的配置文件要求对配置进行详细的中文注释说明。
请帮我分析上述的配置文件开启TUN模式开启DNS之后
日志出现大量的[UDP] dial 🐟 漏网之鱼 (match Match/) mihomo --> 1.1.1.1:53 error: new vless client error: context canceled错误
请分析出现的原因,给出解决办法

View File

@@ -0,0 +1,31 @@
你是一名精通LINUX服务器内核及网络参数调优的顶级专家熟练掌握XrayV2rayTrojan等各种前沿的代理软件代理协议
我现在中国境内需要避开GFW阻拦访问全球互联网现在有两台主机
1. 主机A位于香港
1. 可能存在审查等
2. 带宽为30Mbps
3. 直接访问其的网络质量良好
2. 主机B位于日本
1. 不存在审查等风险
2. 带宽为50Mbps
3. 直接访问网络限度堪忧,丢包率很严重
4. 从主机A访问主机B的网络质量目测还行
请实现如下的内容
1. 我直接访问主机A采用vless协议请分析vless协议是否实现速度和安全性的平衡。国内-主机A-主机B如何实现安全且高效的链式代理请寻找合适的代理协议
2.请实现脚本判定主机A到主机B之间往返之间的网络质量连接速度丢包率等需要在每天的不同时段执行然后输出一份综合性的报告。使用go或者shell实现自行寻找最合适的语言
3.请给出内核及网络优化参数使得主机A和主机B能够满足极致的转发性能减少代理的性能及时延损耗
你是一名优秀的go编程大师使用go语言实现一个主机A到主机B之间网络质量检测程序
1 真实代理延迟模拟真实TCP请求从主机A到主机B然后数据从主机B返回主机A的真实时延或者丢包情况
2 测试主机A到主机B的延迟测试主机B到主机A的延迟
3 测试主机A到主机B的丢包情况测试主机B到主机A的丢包情况,需要测试TCP及UDP丢包
4 [不强制]-针对上述延迟测试, 实现形式为类似tracerooute样式的路由追踪
5 需要定时执行,测试不同时间周期的网络质量,需要形成一份测试报告

View File

@@ -0,0 +1,277 @@
# AdGuard Home 配置文件
# 官方文档参考: https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration
# HTTP/Web 界面相关设置
#http:
# # pprof (性能分析) 相关设置
# pprof:
# port: 6060 # pprof 服务的端口
# enabled: false # 是否启用 pprof默认为 false建议保持禁用
# address: https://xx.tc.hk.go.107421.xyz # Web 界面的监听地址和端口
# session_ttl: 720h # Web 界面登录会话的有效时间 (720小时 = 30天)
# 用户认证设置
users:
# 在这里添加您的用户。您必须手动生成密码的 bcrypt 哈希值。
# 例如,在 Linux 系统中,您可以使用 htpasswd 工具生成:
# htpasswd -nb your_username your_password
# 然后将输出的整行 (例如: your_username:$apr1$....) 替换掉下面的内容。
- name: zeaslity # 您的用户名
password: "$2y$05$b8Vbq3FrGqFNDceFTPFf.eRVYznIineyqtVr60hURTmFKLwdvadCi" # 将这里替换为您生成的密码哈希值
# 认证尝试次数与锁定时间
auth_attempts: 5 # 允许的最大登录失败次数
block_auth_min: 15 # 登录失败次数过多后,锁定登录的分钟数
http_proxy: "" # HTTP 代理地址,通常留空
language: "zh-cn" # Web 界面语言,留空则自动检测浏览器语言
theme: auto # Web 界面主题 (auto, light, dark)
# DNS 服务器相关设置
dns:
bind_hosts:
- 0.0.0.0 # DNS 服务器监听的 IP 地址127.0.0.1 表示只允许本机访问
port: 53 # DNS 服务器监听的端口53是标准DNS端口
anonymize_client_ip: false # 是否在将 EDNS Client Subnet (ECS) 信息转发给上游时匿名化客户端 IP
# DNS 请求速率限制
ratelimit: 40 # 每个客户端每秒允许的最大 DNS 请求数
ratelimit_subnet_len_ipv4: 24 # 用于速率限制的 IPv4 子网掩码长度 (24表示C类地址)
ratelimit_subnet_len_ipv6: 56 # 用于速率限制的 IPv6 子网掩码长度
ratelimit_whitelist: [] # 不受速率限制的 IP 地址列表
refuse_any: true # 是否拒绝类型为 ANY 的 DNS 请求,以防止被用于 DNS 放大攻击
# 上游 DNS 服务器设置
upstream_dns:
- https://dns.google/dns-query # Google DNS (DoH)
- https://1.1.1.1/dns-query # Cloudflare DNS (DoH)
- tls://8.8.4.4:853 # Google DNS (DoT)
upstream_dns_file: "" # 从文件中加载上游 DNS 服务器列表,留空则不使用
# 引导 DNS 服务器 (用于解析上游 DoH/DoT/DoQ 的域名)
bootstrap_dns:
- 1.1.1.1
- 8.8.8.8
# 备用 DNS 服务器,当所有上游服务器都不可用时使用,可以留空
fallback_dns:
- 1.1.1.1
- 8.8.8.8
# 上游服务器查询模式
upstream_mode: load_balance # "load_balance": 负载均衡, "parallel": 并行请求, "fastest_ip": 最快IP模式
fastest_timeout: 1s # 在 "fastest_ip" 模式下,等待响应的超时时间
# 访问控制
allowed_clients: [] # 允许访问的客户端列表,留空表示允许所有
disallowed_clients: [] # 禁止访问的客户端列表
# 默认拦截的域名
blocked_hosts:
- version.bind
- id.server
- hostname.bind
# 信任的反向代理 IP 地址范围
trusted_proxies:
- 127.0.0.0/8
- ::1/128
# DNS 缓存设置
cache_enabled: true # 是否启用 DNS 缓存
cache_size: 419430400 # 缓存大小 (字节, 这里是 400MB)
cache_ttl_min: 0 # 覆盖 DNS 记录的最小 TTL (秒)0 表示不覆盖
cache_ttl_max: 0 # 覆盖 DNS 记录的最大 TTL (秒)0 表示不覆盖
cache_optimistic: false # 是否启用乐观缓存 (返回过期的缓存记录并异步刷新)
bogus_nxdomain: [] # 将指定的 IP 地址的 NXDOMAIN 响应视为伪造响应
aaaa_disabled: false # 是否禁用对 IPv6 (AAAA) 记录的解析
enable_dnssec: false # 是否启用 DNSSEC 支持
# EDNS Client Subnet (ECS) 设置
edns_client_subnet:
custom_ip: "" # 自定义发送给上游的 IP 地址
enabled: false # 是否启用 ECS
use_custom: false # 是否使用上面定义的 custom_ip
max_goroutines: 300 # 处理 DNS 请求的最大并发协程数
handle_ddr: true # 是否处理 Discovery of Designated Resolvers (DDR)
# IPSet 设置 (需要内核支持)
ipset: []
ipset_file: ""
bootstrap_prefer_ipv6: false # 引导DNS是否优先使用 IPv6
upstream_timeout: 10s # 上游 DNS 请求的超时时间
private_networks: [] # 自定义的私有网络范围
use_private_ptr_resolvers: true # 是否为私有地址使用私有反向DNS解析器
local_ptr_upstreams: [] # 用于PTR请求的本地上游DNS
# DNS64 设置 (用于 NAT64)
use_dns64: false
dns64_prefixes: []
# HTTP/3 相关
serve_http3: false # 是否通过 HTTP/3 提供 DoH 服务
use_http3_upstreams: true # 是否使用 HTTP/3 连接到上游 DoH 服务器
serve_plain_dns: false # 是否为 DoH 和 DoT 客户端提供普通DNS (53端口)
hostsfile_enabled: true # 是否使用操作系统的 hosts 文件
# 待处理请求队列 (防止重复向上游请求)
pending_requests:
enabled: true
# TLS (加密) 相关设置
tls:
enabled: true # 是否启用 TLS (HTTPS, DoH, DoT, DoQ)
server_name: xx.tc.hk.go.107421.xyz # 您的服务器域名
force_https: true # 是否强制将 HTTP 请求重定向到 HTTPS
port_https: 443 # HTTPS 端口
port_dns_over_tls: 253 # DNS-over-TLS (DoT) 端口
port_dns_over_quic: 253 # DNS-over-QUIC (DoQ) 端口
port_dnscrypt: 0 # DNSCrypt 端口0表示禁用
dnscrypt_config_file: "" # DNSCrypt 配置文件路径
allow_unencrypted_doh: false # 是否允许通过未加密的 HTTP 接收 DoH 请求
# 证书和私钥设置
certificate_chain: "" # 证书链内容 (如果直接粘贴内容)
private_key: "" # 私钥内容 (如果直接粘贴内容)
certificate_path: /root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer # 证书文件路径
private_key_path: /root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key # 私钥文件路径
strict_sni_check: true # 是否为 DoT 和 DoH 启用严格的 SNI 检查
# 查询日志设置
querylog:
dir_path: "" # 日志文件存储目录,留空为 AdGuard Home 工作目录
ignored: [] # 不记录日志的域名列表
interval: 2160h # 日志轮转周期 (90天)
size_memory: 1000 # 在内存中保留的最新日志条数
enabled: true # 是否启用查询日志
file_enabled: false # 是否将日志写入文件
# 统计信息设置
statistics:
dir_path: "" # 统计数据存储目录
ignored: [] # 不计入统计的域名列表
interval: 24h # 统计信息保留时长
enabled: true # 是否启用统计功能
# 过滤规则列表设置
filters:
# AdGuard DNS 过滤器
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_1.txt
name: AdGuard DNS filter
id: 1
# AdAway 默认黑名单 (已禁用)
- enabled: false
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_2.txt
name: AdAway Default Blocklist
id: 2
whitelist_filters: [] # 白名单过滤列表
user_rules: [] # 用户自定义过滤规则
# DHCP 服务器设置 (当前禁用)
dhcp:
enabled: false
interface_name: ""
local_domain_name: lan
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
# 内容过滤总设置
filtering:
blocking_ipv4: "" # 当域名被拦截时,返回的 IPv4 地址 (留空为默认)
blocking_ipv6: "" # 当域名被拦截时,返回的 IPv6 地址 (留空为默认)
# 按服务拦截
blocked_services:
schedule:
time_zone: Local # 时间表使用的时区
ids: [] # 要拦截的服务 ID 列表
protection_disabled_until: null # 临时禁用保护直至指定时间
# 安全搜索设置
safe_search:
enabled: false # 是否为搜索引擎强制启用安全搜索
bing: true
duckduckgo: true
ecosia: true
google: true
pixabay: true
yandex: true
youtube: true
# 拦截模式
blocking_mode: default # default: 默认模式; nxdomain: 返回 NXDOMAIN; null_ip: 返回 0.0.0.0; custom_ip: 返回自定义 IP
parental_block_host: family-block.dns.adguard.com # 家长控制拦截主机
safebrowsing_block_host: standard-block.dns.adguard.com # 安全浏览拦截主机
rewrites: [] # DNS 重写规则
safe_fs_patterns: [] # 文件系统安全模式
# 各种功能的缓存大小 (字节)
safebrowsing_cache_size: 104857600 # 安全浏览 (100MB)
safesearch_cache_size: 104857600 # 安全搜索 (100MB)
parental_cache_size: 1048576 # 家长控制 (1MB)
cache_time: 180 # 缓存时间 (秒)
filters_update_interval: 24 # 过滤器自动更新间隔 (小时)
blocked_response_ttl: 10 # 被拦截域名的 DNS 响应 TTL (秒)
# 总开关
filtering_enabled: true # 是否启用广告过滤
parental_enabled: false # 是否启用家长控制
safebrowsing_enabled: false # 是否启用安全浏览
protection_enabled: true # AdGuard 总保护开关
# 客户端设置
clients:
runtime_sources:
whois: true
arp: true
rdns: false
dhcp: true
hosts: true
persistent: [] # 持久化客户端设置
# 一般日志设置
log:
enabled: true # 是否启用 AdGuard Home 本身的日志记录
file: "" # 日志文件路径,留空为 stdout
max_backups: 0 # 保留的旧日志文件数量
max_size: 100 # 每个日志文件的最大大小 (MB)
max_age: 3 # 旧日志文件保留天数
compress: false # 是否压缩旧日志文件
local_time: false # 是否使用本地时间记录日志
verbose: false # 是否启用详细日志模式
# 操作系统相关设置
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 30 # 配置文件架构版本,请勿手动修改

View File

@@ -0,0 +1,23 @@
请详细参考AdGuard官方的教程,
- https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration
基于上文给出的配置文件,做出如下的修改
- 针对每一行的配置,给出中文的注释说明
- 实现用户名密码访问控制台,禁止非登录访问
htpasswd -nb -B zeaslity MSuper@123.IO9
zeaslity:$2y$05$b8Vbq3FrGqFNDceFTPFf.eRVYznIineyqtVr60hURTmFKLwdvadCi
dnslookup www.youtube.com https://xx.tc.hk.go.107421.xyz/dns-query
bitsflowcx1@outlook.com
urh!ude9zdf5njy0ZJN
aaa20250822

View File

@@ -18,7 +18,32 @@
"accounts": [
{
"user": "zeaslity",
"pass": "lovemm.23"
"pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
}
],
"udp": true,
"allowTransparent": false
}
},
{
"tag": "proxy-germany",
"port": 22889,
"listen": "0.0.0.0",
"protocol": "socks",
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
],
"routeOnly": false
},
"settings": {
"auth": "password",
"accounts": [
{
"user": "zeaslity",
"pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
}
],
"udp": true,
@@ -43,7 +68,7 @@
"accounts": [
{
"user": "zeaslity",
"pass": "lovemm.23"
"pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
}
],
"udp": true,
@@ -62,7 +87,7 @@
"port": 443,
"users": [
{
"id": "717c40e7-efeb-45bc-8f5e-4e6e7d9eea18",
"id": "0c5741d0-76a9-4945-9c1d-14647afcce24",
"email": "t@t.tt",
"security": "auto",
"encryption": "none",
@@ -89,6 +114,43 @@
"concurrency": -1
}
},
{
"tag": "proxy-germany",
"protocol": "vless",
"settings": {
"vnext": [
{
"address": "45.134.50.233",
"port": 443,
"users": [
{
"id": "b1417d92-998d-410b-a5f3-cf144b6f043e",
"email": "t@t.tt",
"security": "auto",
"encryption": "none",
"flow": "xtls-rprx-vision"
}
]
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"allowInsecure": false,
"serverName": "bingo.107421.xyz",
"alpn": [
"h2"
],
"fingerprint": "firefox"
}
},
"mux": {
"enabled": false,
"concurrency": -1
}
},
{
"tag": "direct",
"protocol": "freedom",
@@ -123,6 +185,13 @@
"proxy-socks"
]
},
{
"type": "field",
"outboundTag": "proxy-germany",
"inboundTag": [
"proxy-germany"
]
},
{
"type": "field",
"outboundTag": "direct",

View File

@@ -0,0 +1,4 @@
export all_proxy=socks5://zeaslity:a1f090ea-e39c-49e7-a3be-9af26b6ce563@42.192.52.227:22888

View File

@@ -0,0 +1,70 @@
{
"log": {
"loglevel": "error"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"tag": "proxy",
"settings": {
"clients": [
{
"id": "302fbcb8-e096-46a1-906f-e879ec5ab0c5",
"flow": "xtls-rprx-vision",
"email": "bfc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.l4.ca.bg.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.l4.ca.bg.107421.xyz_ecc/xx.l4.ca.bg.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
]
}

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# UFW 防火墙规则清除脚本
# 适用于 Ubuntu 22.04
# 检查是否以root权限运行
if [ "$EUID" -ne 0 ]; then
echo "请使用 sudo 运行此脚本"
exit 1
fi
echo "========================================="
echo "开始清除 UFW 防火墙规则"
echo "========================================="
# 1. 禁用UFW
echo ">>> 禁用 UFW 防火墙"
ufw disable
echo "执行: ufw disable"
# 2. 重置UFW到出厂默认状态删除所有规则
echo ">>> 重置 UFW 到出厂默认状态"
echo "y" | ufw reset
echo "执行: ufw reset"
# 3. 恢复默认策略
echo ">>> 恢复默认策略"
ufw default deny incoming
echo "执行: ufw default deny incoming"
ufw default allow outgoing
echo "执行: ufw default allow outgoing"
# 4. 显示当前状态
echo "========================================="
echo "UFW 防火墙规则已全部清除"
echo "当前状态:"
echo "========================================="
ufw status verbose
echo ""
echo "清除完成UFW 已禁用,所有自定义规则已删除"
echo "如需重新启用,请运行: sudo ufw enable"

View File

@@ -0,0 +1,90 @@
#!/bin/bash
# UFW 防火墙配置脚本
# 适用于 Ubuntu 22.04
# 检查是否以root权限运行
if [ "$EUID" -ne 0 ]; then
echo "请使用 sudo 运行此脚本"
exit 1
fi
echo "========================================="
echo "开始配置 UFW 防火墙规则"
echo "========================================="
# 1. 禁用UFW确保配置过程中不会被锁定
echo ">>> 临时禁用 UFW"
ufw disable
# 2. 重置UFW到默认状态清除所有现有规则
echo ">>> 重置 UFW 到默认状态"
echo "y" | ufw reset
# 3. 设置默认策略:允许所有出站流量,拒绝所有入站流量
echo ">>> 设置默认策略:允许出站,拒绝入站"
ufw default allow outgoing
echo "执行: ufw default allow outgoing"
ufw default deny incoming
echo "执行: ufw default deny incoming"
# 4. 允许白名单IP的所有流量入站方向
echo ">>> 添加白名单 IP 规则(允许所有端口和协议)"
echo "执行: ufw allow from 42.192.52.227/32"
ufw allow from 42.192.52.227/32
echo "执行: ufw allow from 43.154.83.213/32"
ufw allow from 43.154.83.213/32
echo "执行: ufw allow from 144.24.164.121/32"
ufw allow from 144.24.164.121/32
echo "执行: ufw allow from 132.145.87.10/32"
ufw allow from 132.145.87.10/32
echo "执行: ufw allow from 140.238.0.0/16"
ufw allow from 140.238.0.0/16
# 5. 允许公网访问指定端口TCP 和 UDP
echo ">>> 开放公网端口0.0.0.0/0"
echo "执行: ufw allow from 0.0.0.0/0 to any port 443 proto tcp"
ufw allow from 0.0.0.0/0 to any port 443 proto tcp
echo "执行: ufw allow from 0.0.0.0/0 to any port 443 proto udp"
ufw allow from 0.0.0.0/0 to any port 443 proto udp
echo "执行: ufw allow from 0.0.0.0/0 to any port 22333 proto tcp"
ufw allow from 0.0.0.0/0 to any port 22333 proto tcp
echo "执行: ufw allow from 0.0.0.0/0 to any port 22333 proto udp"
ufw allow from 0.0.0.0/0 to any port 22333 proto udp
echo "执行: ufw allow from 0.0.0.0/0 to any port 25000:26000 proto tcp"
ufw allow from 0.0.0.0/0 to any port 25000:26000 proto tcp
echo "执行: ufw allow from 0.0.0.0/0 to any port 25000:26000 proto udp"
ufw allow from 0.0.0.0/0 to any port 25000:26000 proto udp
# 6. 禁止非白名单IP的ICMP请求ping
echo ">>> 配置 ICMP 规则仅允许白名单IP"
echo "注意默认拒绝策略已经阻止非白名单的ICMP白名单IP可以ping"
# 7. 启用UFW
echo ">>> 启用 UFW 防火墙"
echo "y" | ufw enable
# 8. 显示当前规则
echo "========================================="
echo "UFW 防火墙配置完成!当前规则如下:"
echo "========================================="
ufw status verbose
echo ""
echo "配置总结:"
echo "- 出站流量:全部允许"
echo "- 入站流量:默认拒绝"
echo "- 开放端口443, 22333, 25000-26000 (TCP/UDP)"
echo "- 白名单IP42.192.52.227, 43.154.83.213, 144.24.164.121, 132.145.87.10, 140.238.0.0/16"
echo "- ICMP仅白名单IP可访问"

View File

@@ -0,0 +1,16 @@
你是一个精通ubuntu22.04系统下ufw使用的计算机高手请实现一个shell脚本实现如下的功能
- 所有的命令均有清晰的中文注释
- 所有执行的命令均使用echo进行打印输出
- 允许全部的流出方向流量
- 开放来源为0.0.0.0/0 流入本机的端口为443 22333 25000-26000的tcp udp流量
- 对以下IP的流入本机方向流量的全部协议及端口
- 42.192.52.227/32
- 43.154.83.213/32
- 144.24.164.121/32
- 132.145.87.10/32
- 140.238.0.0/16
- 禁止其他端口的流入流量
- 禁止非白名单IP的ICMP请求
请同步写出清除上述所有规则的脚本

View File

@@ -9,7 +9,7 @@
"settings": {
"clients": [
{
"id": "717c40e7-efeb-45bc-8f5e-4e6e7d9eea18",
"id": "0c5741d0-76a9-4945-9c1d-14647afcce24",
"flow": "xtls-rprx-vision",
"email": "cc@gg.com",
"level": 0
@@ -72,7 +72,7 @@
"clients": [
{
"email": "ice@qq.com",
"password": "Vad3.123a)asd@1234-as.dasd.asdazzS.123",
"password": "Vad3.123acasd-1234-as.dAsd.asdazzS.123",
"level": 0
}
]

View File

@@ -0,0 +1,72 @@
{
"inbounds": [
{
"protocol": "http",
"port": 2234,
"listen": "0.0.0.0",
"tag": "proxy-http"
},
{
"tag": "proxy",
"protocol": "socks",
"listen": "0.0.0.0",
"port": 1234,
"settings": {
"auth": "noauth",
"udp": true,
"ip": "127.0.0.1",
"userLevel": 0
}
},
{
"protocol": "socks",
"tag": "cloudflare",
"listen": "0.0.0.0",
"port": 1235,
"settings": {
"auth": "noauth",
"udp": true,
"userLevel": 0
}
}
],
"outbounds": [
{
"tag": "cloudflare",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "127.0.0.1",
"port": 40000,
"level": 0
}
]
}
},
{
"tag": "proxy",
"protocol": "freedom"
}
],
"routing": {
"domainStrategy": "IPIfNonMatch",
"rules": [
{
"type": "field",
"inboundTag": [
"cloudflare",
"proxy-http"
],
"outboundTag": "cloudflare"
},
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
}
]
}
}

View File

@@ -9,7 +9,7 @@
"settings": {
"clients": [
{
"id": "12491d80-745c-4e26-a58b-edf584afb208",
"id": "f1335f03-8c67-43c4-ac47-88697e917cc0",
"flow": "xtls-rprx-vision",
"email": "cc@Phoenix-arm02.com",
"level": 0

View File

@@ -0,0 +1,28 @@
{
"log": {
"loglevel": "warning"
},
"inbounds": [
{
"listen": "0.0.0.0",
"port": 31234,
"protocol": "vmess",
"settings": {
"clients": [
{
"id": "7d390fdf-0a48-4a3e-b18c-b18db36c6f23"
}
]
},
"streamSettings": {
"network": "tcp"
}
}
],
"outbounds": [
{
"protocol": "freedom",
"tag": "direct"
}
]
}

View File

@@ -0,0 +1,64 @@
{
"log": {
"loglevel": "warning"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "b1417d92-998d-410b-a5f3-cf144b6f043e",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/bingo.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/bingo.107421.xyz_ecc/bingo.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

View File

@@ -0,0 +1 @@
当前 IP45.134.50.233 来自于:罗马尼亚 蒂米什县 蒂米什瓦拉 bunea.eu

View File

@@ -9,7 +9,7 @@
"settings": {
"clients": [
{
"id": "1dde748d-32ee-4ed7-b70b-f2376d34e7e5",
"id": "1089cc14-557e-47ac-ac85-c07957b3cce3",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
@@ -82,7 +82,7 @@
"clients": [
{
"email": "general@trojan-h2-tokyo2",
"password": "ADasfsaad12.21312@113.adsaddasds.112321",
"password": "ADaSfsaad12.21312-.1Ac13.adsCCddasds.112321",
"level": 0
}
]

View File

@@ -0,0 +1,47 @@
{
"log": {
"loglevel": "warning"
},
"inbounds": [
{
"protocol": "socks",
"listen": "0.0.0.0",
"port": 1234,
"settings": {
"auth": "noauth",
"udp": true,
"ip": "127.0.0.1",
"userLevel": 0
}
},
{
"listen": "0.0.0.0",
"port": 1235,
"protocol": "http",
"tag": "http-no-auth",
"settings": {
"timeout": 300
}
},
{
"listen": "0.0.0.0",
"port": 1236,
"protocol": "http",
"tag": "http-with-auth",
"settings": {
"accounts": [
{
"user": "zeaslity",
"pass": "loveff.22"
}
],
"timeout": 300
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

Binary file not shown.

View File

@@ -0,0 +1,58 @@
# Restrict access to the website by IP or wrong domain name) and return 400
server {
listen unix:/dev/shm/h2c.sock proxy_protocol default_server;
# listen 5000;
http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
server {
# listen 5001;
listen unix:/dev/shm/h1.sock proxy_protocol default_server;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
# HTTP1 UDS listener
server {
listen unix:/dev/shm/h1.sock proxy_protocol;
# listen 5001;
server_name xx.l4.ca.bg.107421.xyz;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html/;
index index.html index.htm;
}
}
# HTTP2 UDS listener
server {
listen unix:/dev/shm/h2c.sock proxy_protocol;
http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name xx.l4.ca.bg.107421.xyz;
# grpc settings
# grpc_read_timeout 1h;
# grpc_send_timeout 1h;
# grpc_set_header X-Real-IP $remote_addr;
# Decoy website
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html;
index index.html index.htm;
}
}

View File

@@ -0,0 +1,57 @@
# Restrict access to the website by IP or wrong domain name) and return 400
server {
listen unix:/dev/shm/h2c.sock http2 proxy_protocol default_server;
# listen 5000;
# http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
server {
# listen 5001;
listen unix:/dev/shm/h1.sock proxy_protocol default_server;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
# HTTP1 UDS listener
server {
listen unix:/dev/shm/h1.sock proxy_protocol;
# listen 5001;
server_name bingo.107421.xyz;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html/;
index index.html index.htm;
}
}
# HTTP2 UDS listener
server {
listen unix:/dev/shm/h2c.sock http2 proxy_protocol;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name bingo.107421.xyz;
# grpc settings
# grpc_read_timeout 1h;
# grpc_send_timeout 1h;
# grpc_set_header X-Real-IP $remote_addr;
# Decoy website
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html;
index index.html index.htm;
}
}

View File

@@ -17,3 +17,26 @@ echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority:
sudo apt update
sudo apt install -y nginx
mkdir -p /var/www/html/
# 上传文件
mv dist.zip /var/www/html/
cd /var/www/html/
unzip dist.zip
chown -R www-data:www-data /var/www/html/
chmod -R 755 /var/www/html/
# nginx配置
systemctl restart nginx
systemctl enable nginx
systemctl restart xray
systemctl enable xray
journalctl -u nginx -n 100 -f
journalctl -u xray -n 100 -f

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,28 @@
vmess://eyJ2IjoiMiIsInBzIjoidXMtY2VudGUtZnJlZSIsImFkZCI6Im5vcnRoZmxhbmsuMTA3NDIxLnh5eiIsInBvcnQiOjQ0MywiaWQiOiJkZTA0YWRkOS01YzY4LThiYWItOTUwYy0wOGNkNTMyMGRmMTgiLCJhaWQiOjAsInNjeSI6ImF1dG8iLCJuZXQiOiJ3cyIsInBhdGgiOiIvdm1lc3MiLCJ0bHMiOiJ0bHMifQ==
trojan://Vad3.123a%29asd1234-asdasd.asdazzS.123@43.154.83.213:443?flow=xtls-rprx-vision&security=tls&sni=xx.tc.hk.go.107421.xyz&alpn=h2&fp=firefox&type=http&path=trh2#TC-HK-Trojan
vless://b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0@43.154.83.213:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=book.107421.xyz&alpn=h2%2Chttp%2F1.1&fp=firefox&type=tcp&headerType=none#TC-HK-Vless
vless://1dde748d-32ee-4ed7-b70b-f2376d34e7e5@132.145.87.10:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=xx.s0.yy.ac.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none&host=xx.s0.yy.ac.107421.xyz#Oracle-Seoul-ARM01-Vless
vless://1dde748d-32ee-4ed7-b70b-f2376d34e7e5@140.238.14.103:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=xx.s4.cc.hh.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none&host=xx.s4.cc.hh.107421.xyz#Oracle-Seoul-Vless
socks://emVhc2xpdHk6bG92ZW1tLjIz@42.192.52.227:22888#TC-SH-LosA-BanH
vless://717c40e7-efeb-45bc-8f5e-4e6e7d9eea18@89.208.251.209:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=octopus.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none#BanH-LosA-Vless
trojan://Vad3.123a%29asd%401234-as.dasd.asdazzS.123@89.208.251.209:443?flow=xtls-rprx-vision&security=tls&sni=xx.l4.cc.nn.107421.xyz&alpn=h2&fp=firefox&type=http&host=xx.l4.cc.nn.107421.xyz&path=status#BanH-LosA-Trojan
trojan://ADasfsaad12.21312%40113.adsaddasds.112321@140.238.14.103:443?flow=xtls-rprx-vision&security=tls&sni=xx.t2.ll.c0.107421.xyz&alpn=h2&fp=firefox&type=http&host=xx.t2.ll.c0.107421.xyz&path=vlh2tokyo2#Oracle-Tokyo-Trojan
vless://12491d80-745c-4e26-a58b-edf584afb208@129.146.57.94:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=zc.p4.cc.xx.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none#Oracle-Pheonix-ARM02-Vless
vless://f8702759-f402-4e85-92a6-8540d577de22@43.154.83.213:24443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#TC-HongKong
vless://302fbcb8-e096-46a1-906f-e879ec5ab0c5@45.143.128.143:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.l4.ca.bg.107421.xyz#BFC-LosAngles
vless://9fa9b4e7-d76d-4890-92cf-ce9251a76f59@43.154.83.213:24451?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Care-DEU-Dusseldorf-R-TCHK
vless://7e27da0c-3013-4ed4-817b-50cc76a0bf81@43.154.83.213:24449?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-KOR-Seoul-R-TCHK
vless://c751811a-404f-4a05-bc41-5d572e741398@43.154.83.213:25000?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-JPN-Tokyo-R-TCHK
vless://fce2a9c6-1380-4ffa-ba84-6b9ec9ee2eea@43.154.83.213:25001?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-USA-Phoenix-R-TCHK
vless://93be1d17-8e02-449d-bb99-683ed46fbe50@43.154.83.213:24453?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#CF-HongKong-R-TCHK
vless://cdf0b19a-9524-48d5-b697-5f10bb567734@43.154.83.213:24452?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-HongKong
vless://b1417d92-998d-410b-a5f3-cf144b6f043e@45.134.50.233:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=bingo.107421.xyz#Care-DEU-Dusseldorf
vless://1089cc14-557e-47ac-ac85-c07957b3cce3@140.238.14.103:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.s4.cc.hh.107421.xyz#Oracle-KOR-Seoul
vless://6055eac4-dee7-463b-b575-d30ea94bb768@43.154.83.213:24444?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-DEU-Frankfurt
vless://1cd284b2-d3d8-4165-b773-893f836c2b51@43.154.83.213:24445?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-KOR-Seoul
vless://bf0e9c35-84a9-460e-b5bf-2fa9f2fb3bca@43.154.83.213:24446?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-JPN-Tokyo
vless://adc19390-373d-4dfc-b0f6-19fab1b6fbf6@43.154.83.213:24447?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-GBR-London
vless://e31bc28e-8ebd-4d72-a98e-9227f26dfac3@43.154.83.213:24448?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-SGP
vless://56fb312c-bdb0-48ca-bf66-4a2dd34040c6@43.154.83.213:24450?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-USA-LosAngles
vless://86c50e3a-5b87-49dd-bd20-03c7f2735e40@bingo.pp.icederce.ip-ddns.com:8443?encryption=none&security=tls&type=ws&sni=pp.icederce.ip-ddns.com&host=pp.icederce.ip-ddns.com&path=/?ed=2560#CF_VIDEO_1
vless://86c50e3a-5b87-49dd-bd20-03c7f2735e40@bingo.icederce.ip-ddns.com:8443?encryption=none&security=tls&type=ws&sni=pp.icederce.ip-ddns.com&host=pp.icederce.ip-ddns.com&path=/?ed=2560#CF_VIDEO_2
vless://21dab95b-088e-47bd-8351-609fd23cb33c@140.238.14.103:20443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.t2.ll.c0.107421.xyz#Oracle-JPN-Tokyo-R-OSel
vless://4c2dd763-56e5-408f-bc8f-dbf4c1fe41f9@140.238.14.103:21443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.o1.vl.s4.107421.xyz#Oracle-JPN-Osaka-R-OSel
vless://de576486-e254-4d9d-949a-37088358ec23@140.238.14.103:22443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.p2.vl.s4.107421.xyz#Oracle-USA-Phoenix-R-OSel

View File

@@ -0,0 +1,55 @@
# ==========================================================
# 核心拥塞控制与队列管理 (解决丢包与延迟)
# ==========================================================
# 使用 FQ (Fair Queueing) 队列调度算法这是BBR的必要配合
net.core.default_qdisc = fq
# 开启 BBR 拥塞控制算法
net.ipv4.tcp_congestion_control = bbr
# ==========================================================
# TCP 缓冲区调优 (针对 30-50Mbps 带宽优化)
# ==========================================================
# 格式: min default max
# 核心策略:限制 max 值,防止 Bufferbloat。
# 2MB (2097152) 的缓冲区足以应对 50Mbps 下 300ms 的抖动,再大就会导致高延迟。
# 接收缓冲区
net.ipv4.tcp_rmem = 4096 87380 2097152
# 发送缓冲区
net.ipv4.tcp_wmem = 4096 65536 2097152
# 开启窗口缩放,允许窗口超过 64KB
net.ipv4.tcp_window_scaling = 1
# ==========================================================
# 连接追踪与并发优化 (Xray 高并发转发需求)
# ==========================================================
# 增加系统级文件描述符限制
fs.file-max = 1048576
# 增加入站连接队列长度,防止突发流量导致握手失败
net.core.somaxconn = 4096
net.core.netdev_max_backlog = 4096
# TCP Fast Open (TFO):减少握手延迟
# 值 3 表示同时开启客户端和服务端的 TFO 支持
# Xray 需要在配置中显式开启 "tcpFastOpen": true 才能生效
net.ipv4.tcp_fastopen = 3
# ==========================================================
# 抗丢包与快速恢复
# ==========================================================
# 开启 SACK (选择性确认),对丢包环境极其重要
net.ipv4.tcp_sack = 1
net.ipv4.tcp_dsack = 1
net.ipv4.tcp_fack = 1
# 缩短保活探测时间快速剔除死连接GFW常导致连接“假死”
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_keepalive_intvl = 15
# ==========================================================
# 转发功能开启 (主机A必须)
# ==========================================================
net.ipv4.ip_forward = 1

View File

@@ -0,0 +1,7 @@
module wdd.io/net-monitor
go 1.25.3
require golang.org/x/net v0.47.0
require golang.org/x/sys v0.38.0 // indirect

View File

@@ -0,0 +1,4 @@
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=

View File

@@ -0,0 +1,730 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net"
"os"
"strings"
"sync"
"time"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv4"
)
// ============ 通用数据结构 ============
type TestPacket struct {
SeqNum uint64
Timestamp int64
Type string // "tcp", "udp", "ping"
Data []byte
}
type Stats struct {
PacketsReceived uint64
PacketsLost uint64
LastSeqNum uint64
RTTSamples []time.Duration
}
type Metrics struct {
PacketsSent uint64
PacketsReceived uint64
PacketsLost uint64
RTTSamples []time.Duration
MinRTT time.Duration
MaxRTT time.Duration
AvgRTT time.Duration
Jitter time.Duration
}
type TestReport struct {
Timestamp time.Time
TestDuration time.Duration
TargetHost string
TCPMetrics *Metrics
UDPMetrics *Metrics
TracerouteHops []HopInfo
}
type HopInfo struct {
TTL int
Address string
RTT time.Duration
}
// ============ 服务端实现 ============
type NetworkServer struct {
tcpAddr string
udpAddr string
tcpStats *Stats
udpStats *Stats
statsLock sync.RWMutex
}
func NewNetworkServer(tcpPort, udpPort int) *NetworkServer {
return &NetworkServer{
tcpAddr: fmt.Sprintf(":%d", tcpPort),
udpAddr: fmt.Sprintf(":%d", udpPort),
tcpStats: &Stats{},
udpStats: &Stats{},
}
}
func (ns *NetworkServer) Start() {
log.Printf("========== 网络质量检测服务端 ==========")
log.Printf("TCP监听端口: %s", ns.tcpAddr)
log.Printf("UDP监听端口: %s", ns.udpAddr)
log.Printf("服务器已启动,等待客户端连接...")
log.Printf("========================================\n")
// 启动TCP服务器
go ns.serveTCP()
// 启动UDP服务器
go ns.serveUDP()
select {}
}
func (ns *NetworkServer) serveTCP() {
listener, err := net.Listen("tcp", ns.tcpAddr)
if err != nil {
log.Fatalf("TCP监听失败: %v", err)
}
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
log.Printf("TCP连接接受错误: %v", err)
continue
}
go ns.handleTCPConnection(conn)
}
}
func (ns *NetworkServer) handleTCPConnection(conn net.Conn) {
defer conn.Close()
log.Printf("[TCP] 新连接来自 %s", conn.RemoteAddr())
buf := make([]byte, 8192)
for {
n, err := conn.Read(buf)
if err != nil {
log.Printf("[TCP] 连接 %s 断开", conn.RemoteAddr())
return
}
var packet TestPacket
if err := json.Unmarshal(buf[:n], &packet); err != nil {
continue
}
receiveTime := time.Now().UnixNano()
ns.updateStats(ns.tcpStats, packet.SeqNum)
// 立即回显数据包
response := TestPacket{
SeqNum: packet.SeqNum,
Timestamp: receiveTime,
Type: "tcp_response",
Data: packet.Data,
}
data, _ := json.Marshal(response)
conn.Write(data)
}
}
func (ns *NetworkServer) serveUDP() {
addr, err := net.ResolveUDPAddr("udp", ns.udpAddr)
if err != nil {
log.Fatalf("UDP地址解析失败: %v", err)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
log.Fatalf("UDP监听失败: %v", err)
}
defer conn.Close()
log.Printf("[UDP] 监听在 %s", ns.udpAddr)
buf := make([]byte, 8192)
for {
n, remoteAddr, err := conn.ReadFromUDP(buf)
if err != nil {
log.Printf("UDP读取错误: %v", err)
continue
}
var packet TestPacket
if err := json.Unmarshal(buf[:n], &packet); err != nil {
continue
}
receiveTime := time.Now().UnixNano()
ns.updateStats(ns.udpStats, packet.SeqNum)
// 回显UDP数据包
response := TestPacket{
SeqNum: packet.SeqNum,
Timestamp: receiveTime,
Type: "udp_response",
Data: packet.Data,
}
data, _ := json.Marshal(response)
conn.WriteToUDP(data, remoteAddr)
}
}
func (ns *NetworkServer) updateStats(stats *Stats, seqNum uint64) {
ns.statsLock.Lock()
defer ns.statsLock.Unlock()
stats.PacketsReceived++
if seqNum > stats.LastSeqNum+1 {
stats.PacketsLost += seqNum - stats.LastSeqNum - 1
}
stats.LastSeqNum = seqNum
}
// ============ 客户端实现 ============
type NetworkClient struct {
targetHost string
tcpPort int
udpPort int
testDuration time.Duration
packetSize int
reportFile string
tcpMetrics *Metrics
udpMetrics *Metrics
mu sync.Mutex
}
func NewNetworkClient(host string, tcpPort, udpPort int, duration time.Duration) *NetworkClient {
return &NetworkClient{
targetHost: host,
tcpPort: tcpPort,
udpPort: udpPort,
testDuration: duration,
packetSize: 1024,
reportFile: "network_quality_report.json",
tcpMetrics: &Metrics{MinRTT: time.Hour},
udpMetrics: &Metrics{MinRTT: time.Hour},
}
}
func (nc *NetworkClient) testTCPLatency() error {
addr := fmt.Sprintf("%s:%d", nc.targetHost, nc.tcpPort)
conn, err := net.DialTimeout("tcp", addr, 5*time.Second)
if err != nil {
return fmt.Errorf("TCP连接失败: %v", err)
}
defer conn.Close()
log.Printf("[TCP] 开始延迟测试 -> %s", addr)
var seqNum uint64
deadline := time.Now().Add(nc.testDuration)
for time.Now().Before(deadline) {
seqNum++
packet := TestPacket{
SeqNum: seqNum,
Timestamp: time.Now().UnixNano(),
Type: "tcp_probe",
Data: make([]byte, nc.packetSize),
}
sendTime := time.Now()
data, _ := json.Marshal(packet)
if _, err := conn.Write(data); err != nil {
nc.mu.Lock()
nc.tcpMetrics.PacketsLost++
nc.mu.Unlock()
continue
}
nc.mu.Lock()
nc.tcpMetrics.PacketsSent++
nc.mu.Unlock()
buf := make([]byte, 8192)
conn.SetReadDeadline(time.Now().Add(2 * time.Second))
_, err := conn.Read(buf)
if err != nil {
nc.mu.Lock()
nc.tcpMetrics.PacketsLost++
nc.mu.Unlock()
continue
}
rtt := time.Since(sendTime)
nc.updateTCPMetrics(rtt)
time.Sleep(100 * time.Millisecond)
}
log.Printf("[TCP] 测试完成")
return nil
}
func (nc *NetworkClient) testUDPLatency() error {
addr := fmt.Sprintf("%s:%d", nc.targetHost, nc.udpPort)
raddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return fmt.Errorf("UDP地址解析失败: %v", err)
}
conn, err := net.DialUDP("udp", nil, raddr)
if err != nil {
return fmt.Errorf("UDP连接失败: %v", err)
}
defer conn.Close()
log.Printf("[UDP] 开始延迟测试 -> %s", addr)
var seqNum uint64
deadline := time.Now().Add(nc.testDuration)
sentPackets := make(map[uint64]time.Time)
var wg sync.WaitGroup
// 发送协程
wg.Add(1)
go func() {
defer wg.Done()
for time.Now().Before(deadline) {
seqNum++
packet := TestPacket{
SeqNum: seqNum,
Timestamp: time.Now().UnixNano(),
Type: "udp_probe",
Data: make([]byte, nc.packetSize),
}
sendTime := time.Now()
nc.mu.Lock()
sentPackets[seqNum] = sendTime
nc.mu.Unlock()
data, _ := json.Marshal(packet)
conn.Write(data)
nc.mu.Lock()
nc.udpMetrics.PacketsSent++
nc.mu.Unlock()
time.Sleep(100 * time.Millisecond)
}
}()
// 接收协程
buf := make([]byte, 8192)
for time.Now().Before(deadline.Add(3 * time.Second)) {
conn.SetReadDeadline(time.Now().Add(1 * time.Second))
n, err := conn.Read(buf)
if err != nil {
continue
}
var response TestPacket
if err := json.Unmarshal(buf[:n], &response); err != nil {
continue
}
nc.mu.Lock()
if sendTime, ok := sentPackets[response.SeqNum]; ok {
rtt := time.Since(sendTime)
nc.updateUDPMetrics(rtt)
delete(sentPackets, response.SeqNum)
}
nc.mu.Unlock()
}
wg.Wait()
nc.mu.Lock()
nc.udpMetrics.PacketsLost = uint64(len(sentPackets))
nc.mu.Unlock()
log.Printf("[UDP] 测试完成")
return nil
}
func (nc *NetworkClient) performTraceroute() ([]HopInfo, error) {
log.Printf("[Traceroute] 路由追踪到 %s", nc.targetHost)
hops := make([]HopInfo, 0, 30)
maxTTL := 30
timeout := 2 * time.Second
for ttl := 1; ttl <= maxTTL; ttl++ {
hopInfo, reached, err := nc.probeHop(ttl, timeout)
if err != nil {
continue
}
hops = append(hops, hopInfo)
if hopInfo.Address != "*" {
log.Printf(" %2d %-15s %.2fms", ttl, hopInfo.Address,
float64(hopInfo.RTT.Microseconds())/1000.0)
} else {
log.Printf(" %2d *", ttl)
}
if reached {
break
}
}
return hops, nil
}
func (nc *NetworkClient) probeHop(ttl int, timeout time.Duration) (HopInfo, bool, error) {
conn, err := icmp.ListenPacket("ip4:icmp", "0.0.0.0")
if err != nil {
return HopInfo{}, false, err
}
defer conn.Close()
if err := conn.IPv4PacketConn().SetTTL(ttl); err != nil {
return HopInfo{}, false, err
}
msg := icmp.Message{
Type: ipv4.ICMPTypeEcho,
Code: 0,
Body: &icmp.Echo{
ID: os.Getpid() & 0xffff,
Seq: ttl,
Data: []byte("TRACEROUTE"),
},
}
msgBytes, err := msg.Marshal(nil)
if err != nil {
return HopInfo{}, false, err
}
dst, err := net.ResolveIPAddr("ip4", nc.targetHost)
if err != nil {
return HopInfo{}, false, err
}
start := time.Now()
if _, err := conn.WriteTo(msgBytes, dst); err != nil {
return HopInfo{}, false, err
}
reply := make([]byte, 1500)
conn.SetReadDeadline(time.Now().Add(timeout))
_, peer, err := conn.ReadFrom(reply)
rtt := time.Since(start)
if err != nil {
return HopInfo{TTL: ttl, Address: "*", RTT: 0}, false, nil
}
hopAddr := peer.String()
reachedTarget := (hopAddr == dst.String())
return HopInfo{
TTL: ttl,
Address: hopAddr,
RTT: rtt,
}, reachedTarget, nil
}
func (nc *NetworkClient) updateTCPMetrics(rtt time.Duration) {
nc.mu.Lock()
defer nc.mu.Unlock()
nc.tcpMetrics.PacketsReceived++
nc.tcpMetrics.RTTSamples = append(nc.tcpMetrics.RTTSamples, rtt)
if rtt < nc.tcpMetrics.MinRTT {
nc.tcpMetrics.MinRTT = rtt
}
if rtt > nc.tcpMetrics.MaxRTT {
nc.tcpMetrics.MaxRTT = rtt
}
}
func (nc *NetworkClient) updateUDPMetrics(rtt time.Duration) {
nc.udpMetrics.PacketsReceived++
nc.udpMetrics.RTTSamples = append(nc.udpMetrics.RTTSamples, rtt)
if rtt < nc.udpMetrics.MinRTT {
nc.udpMetrics.MinRTT = rtt
}
if rtt > nc.udpMetrics.MaxRTT {
nc.udpMetrics.MaxRTT = rtt
}
}
func (nc *NetworkClient) calculateMetrics() {
// 计算TCP平均RTT和抖动
if len(nc.tcpMetrics.RTTSamples) > 0 {
var sum time.Duration
for _, rtt := range nc.tcpMetrics.RTTSamples {
sum += rtt
}
nc.tcpMetrics.AvgRTT = sum / time.Duration(len(nc.tcpMetrics.RTTSamples))
var jitterSum time.Duration
for i := 1; i < len(nc.tcpMetrics.RTTSamples); i++ {
diff := nc.tcpMetrics.RTTSamples[i] - nc.tcpMetrics.RTTSamples[i-1]
if diff < 0 {
diff = -diff
}
jitterSum += diff
}
if len(nc.tcpMetrics.RTTSamples) > 1 {
nc.tcpMetrics.Jitter = jitterSum / time.Duration(len(nc.tcpMetrics.RTTSamples)-1)
}
}
// 计算UDP平均RTT和抖动
if len(nc.udpMetrics.RTTSamples) > 0 {
var sum time.Duration
for _, rtt := range nc.udpMetrics.RTTSamples {
sum += rtt
}
nc.udpMetrics.AvgRTT = sum / time.Duration(len(nc.udpMetrics.RTTSamples))
var jitterSum time.Duration
for i := 1; i < len(nc.udpMetrics.RTTSamples); i++ {
diff := nc.udpMetrics.RTTSamples[i] - nc.udpMetrics.RTTSamples[i-1]
if diff < 0 {
diff = -diff
}
jitterSum += diff
}
if len(nc.udpMetrics.RTTSamples) > 1 {
nc.udpMetrics.Jitter = jitterSum / time.Duration(len(nc.udpMetrics.RTTSamples)-1)
}
}
}
func (nc *NetworkClient) generateReport(hops []HopInfo) error {
nc.calculateMetrics()
report := TestReport{
Timestamp: time.Now(),
TestDuration: nc.testDuration,
TargetHost: nc.targetHost,
TCPMetrics: nc.tcpMetrics,
UDPMetrics: nc.udpMetrics,
TracerouteHops: hops,
}
// 将报告序列化为JSON单行不格式化
data, err := json.Marshal(report)
if err != nil {
return err
}
// 以追加模式打开文件,如果不存在则创建
file, err := os.OpenFile(nc.reportFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return fmt.Errorf("打开报告文件失败: %v", err)
}
defer file.Close()
// 写入JSON数据并添加换行符JSON Lines格式
if _, err := file.Write(data); err != nil {
return fmt.Errorf("写入报告数据失败: %v", err)
}
if _, err := file.WriteString("\n"); err != nil {
return fmt.Errorf("写入换行符失败: %v", err)
}
log.Printf("测试报告已追加至: %s", nc.reportFile)
nc.printReport(&report)
return nil
}
func (nc *NetworkClient) printReport(report *TestReport) {
fmt.Println("\n" + strings.Repeat("=", 50))
fmt.Println(" 网络质量检测报告")
fmt.Println(strings.Repeat("=", 50))
fmt.Printf("测试时间: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
fmt.Printf("目标主机: %s\n", report.TargetHost)
fmt.Printf("测试时长: %v\n", report.TestDuration)
fmt.Println(strings.Repeat("-", 50))
fmt.Println("\n【TCP 测试结果】")
fmt.Printf(" 发送包数: %d\n", report.TCPMetrics.PacketsSent)
fmt.Printf(" 接收包数: %d\n", report.TCPMetrics.PacketsReceived)
if report.TCPMetrics.PacketsSent > 0 {
lossRate := float64(report.TCPMetrics.PacketsLost) / float64(report.TCPMetrics.PacketsSent) * 100
fmt.Printf(" 丢包数量: %d (丢包率: %.2f%%)\n", report.TCPMetrics.PacketsLost, lossRate)
}
if report.TCPMetrics.MinRTT < time.Hour {
fmt.Printf(" 最小RTT: %v\n", report.TCPMetrics.MinRTT)
fmt.Printf(" 平均RTT: %v\n", report.TCPMetrics.AvgRTT)
fmt.Printf(" 最大RTT: %v\n", report.TCPMetrics.MaxRTT)
fmt.Printf(" 抖动: %v\n", report.TCPMetrics.Jitter)
}
fmt.Println("\n【UDP 测试结果】")
fmt.Printf(" 发送包数: %d\n", report.UDPMetrics.PacketsSent)
fmt.Printf(" 接收包数: %d\n", report.UDPMetrics.PacketsReceived)
if report.UDPMetrics.PacketsSent > 0 {
lossRate := float64(report.UDPMetrics.PacketsLost) / float64(report.UDPMetrics.PacketsSent) * 100
fmt.Printf(" 丢包数量: %d (丢包率: %.2f%%)\n", report.UDPMetrics.PacketsLost, lossRate)
}
if report.UDPMetrics.MinRTT < time.Hour {
fmt.Printf(" 最小RTT: %v\n", report.UDPMetrics.MinRTT)
fmt.Printf(" 平均RTT: %v\n", report.UDPMetrics.AvgRTT)
fmt.Printf(" 最大RTT: %v\n", report.UDPMetrics.MaxRTT)
fmt.Printf(" 抖动: %v\n", report.UDPMetrics.Jitter)
}
fmt.Println(strings.Repeat("=", 50))
fmt.Printf("报告已保存至: %s\n\n", nc.reportFile)
}
func (nc *NetworkClient) RunScheduledTests(interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
log.Printf("\n========== 开始定时测试 [%s] ==========",
time.Now().Format("2006-01-02 15:04:05"))
// 执行TCP测试
if err := nc.testTCPLatency(); err != nil {
log.Printf("TCP测试错误: %v", err)
}
// 重置UDP指标
nc.mu.Lock()
nc.udpMetrics = &Metrics{MinRTT: time.Hour}
nc.mu.Unlock()
// 执行UDP测试
if err := nc.testUDPLatency(); err != nil {
log.Printf("UDP测试错误: %v", err)
}
// 执行Traceroute
hops, err := nc.performTraceroute()
if err != nil {
log.Printf("Traceroute错误: %v", err)
}
// 生成报告
if err := nc.generateReport(hops); err != nil {
log.Printf("报告生成错误: %v", err)
}
// 重置TCP指标准备下一轮
nc.mu.Lock()
nc.tcpMetrics = &Metrics{MinRTT: time.Hour}
nc.mu.Unlock()
log.Printf("========== 测试完成,等待下一轮 ==========\n")
<-ticker.C
}
}
// ============ 主程序 ============
func main() {
// 定义命令行参数
var (
mode = flag.String("mode", "", "运行模式: server(服务端) 或 client(客户端)")
tcpPort = flag.Int("tcp", 9001, "TCP测试端口")
udpPort = flag.Int("udp", 9002, "UDP测试端口")
targetHost = flag.String("target", "", "目标主机地址(客户端模式必需)")
testDuration = flag.Int("duration", 60, "单次测试时长(秒)")
interval = flag.Int("interval", 3600, "定时测试间隔(秒), 0表示只执行一次")
)
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "\n网络质量检测工具\n\n")
fmt.Fprintf(os.Stderr, "用法:\n")
fmt.Fprintf(os.Stderr, " 服务端模式: %s -mode server [-tcp 端口] [-udp 端口]\n", os.Args[0])
fmt.Fprintf(os.Stderr, " 客户端模式: %s -mode client -target 目标IP [-tcp 端口] [-udp 端口] [-duration 秒] [-interval 秒]\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "参数说明:\n")
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "\n示例:\n")
fmt.Fprintf(os.Stderr, " # 启动服务端,使用默认端口\n")
fmt.Fprintf(os.Stderr, " %s -mode server\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # 启动服务端,自定义端口\n")
fmt.Fprintf(os.Stderr, " %s -mode server -tcp 8001 -udp 8002\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # 启动客户端单次测试60秒\n")
fmt.Fprintf(os.Stderr, " %s -mode client -target 192.168.1.100 -duration 60 -interval 0\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, " # 启动客户端,每小时测试一次\n")
fmt.Fprintf(os.Stderr, " %s -mode client -target 192.168.1.100 -interval 3600\n\n", os.Args[0])
}
flag.Parse()
// 验证参数
if *mode == "" {
fmt.Fprintf(os.Stderr, "错误: 必须指定运行模式 -mode server 或 -mode client\n\n")
flag.Usage()
os.Exit(1)
}
switch *mode {
case "server":
// 服务端模式
server := NewNetworkServer(*tcpPort, *udpPort)
server.Start()
case "client":
// 客户端模式
if *targetHost == "" {
fmt.Fprintf(os.Stderr, "错误: 客户端模式必须指定 -target 参数\n\n")
flag.Usage()
os.Exit(1)
}
client := NewNetworkClient(*targetHost, *tcpPort, *udpPort, time.Duration(*testDuration)*time.Second)
if *interval == 0 {
// 单次执行
log.Printf("开始单次网络质量测试...")
if err := client.testTCPLatency(); err != nil {
log.Printf("TCP测试错误: %v", err)
}
if err := client.testUDPLatency(); err != nil {
log.Printf("UDP测试错误: %v", err)
}
hops, err := client.performTraceroute()
if err != nil {
log.Printf("Traceroute错误: %v", err)
}
if err := client.generateReport(hops); err != nil {
log.Printf("报告生成错误: %v", err)
}
} else {
// 定时执行
log.Printf("开始定时网络质量监控,间隔: %d秒", *interval)
client.RunScheduledTests(time.Duration(*interval) * time.Second)
}
default:
fmt.Fprintf(os.Stderr, "错误: 无效的运行模式 '%s',必须是 'server' 或 'client'\n\n", *mode)
flag.Usage()
os.Exit(1)
}
}

View File

@@ -0,0 +1,407 @@
package main
//
//import (
// "encoding/binary"
// "encoding/json"
// "flag"
// "fmt"
// "io"
// "log"
// "net"
// "os"
// "sync"
// "time"
//)
//
//// 配置常量
//const (
// ProtocolTCP = "tcp"
// ProtocolUDP = "udp"
// PacketSize = 64 // 探测包大小
// LossTestCount = 20 // 每次丢包测试发送的数据包数量
// TraceMaxTTL = 30 // 路由追踪最大跳数
// ReportFileName = "network_quality_report.log"
//)
//
//// 命令行参数
//var (
// mode = flag.String("mode", "client", "运行模式: server 或 client")
// targetIP = flag.String("target", "127.0.0.1", "目标IP地址 (客户端模式填写服务端的IP)")
// tcpPort = flag.String("tcp-port", "8080", "TCP监听/连接端口")
// udpPort = flag.String("udp-port", "8081", "UDP监听/连接端口")
// interval = flag.Int("interval", 10, "测试间隔时间(秒)")
// doTrace = flag.Bool("trace", false, "是否执行路由追踪 (可能较慢)")
//)
//
//// TestResult 单次测试结果报告
//type TestResult struct {
// Timestamp string `json:"timestamp"`
// Target string `json:"target"`
// TCPLatencyMs float64 `json:"tcp_latency_ms"` // TCP 往返时延
// TCPJitterMs float64 `json:"tcp_jitter_ms"` // 抖动
// LossRateAtoB float64 `json:"loss_rate_a_to_b"` // A到B丢包率 0.0 - 1.0
// LossRateBtoA float64 `json:"loss_rate_b_to_a"` // B到A丢包率 0.0 - 1.0 (模拟)
// TraceRoute []string `json:"traceroute,omitempty"` // 路由路径
//}
//
//// Global logger
//var logger *log.Logger
//
//// C:\Users\wddsh\go\bin\gox.exe -osarch="linux/amd64" -output "build/agent-wdd_{{.OS}}_{{.Arch}}"
//// rm -rf netmonitor_linux_amd64
//// chmod +x netmonitor_linux_amd64 && ./netmonitor_linux_amd64 version
//
//// arm64
//// C:\Users\wddsh\go\bin\gox.exe -osarch="linux/arm64" -output "build/netmonitor_{{.OS}}_{{.Arch}}"
//func main() {
// flag.Parse()
//
// // 初始化日志
// file, err := os.OpenFile(ReportFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err != nil {
// fmt.Printf("无法创建日志文件: %v\n", err)
// return
// }
// defer file.Close()
//
// // 同时输出到控制台和文件
// multiWriter := io.MultiWriter(os.Stdout, file)
// logger = log.New(multiWriter, "", log.LstdFlags)
//
// if *mode == "server" {
// runServer()
// } else {
// runClient()
// }
//}
//
//// ======================= 服务端逻辑 (Host B) =======================
//
//func runServer() {
// logger.Println("=== 启动主机 B (Server Mode) ===")
// var wg sync.WaitGroup
// wg.Add(2)
//
// // 1. 启动 TCP Echo Server (用于延迟测试和信令)
// go func() {
// defer wg.Done()
// addr := fmt.Sprintf("0.0.0.0:%s", *tcpPort)
// listener, err := net.Listen(ProtocolTCP, addr)
// if err != nil {
// logger.Fatalf("TCP 监听失败: %v", err)
// }
// logger.Printf("TCP 服务监听中: %s", addr)
//
// for {
// conn, err := listener.Accept()
// if err != nil {
// continue
// }
// go handleTCPConnection(conn)
// }
// }()
//
// // 2. 启动 UDP Server (用于丢包测试)
// go func() {
// defer wg.Done()
// addr := fmt.Sprintf("0.0.0.0:%s", *udpPort)
// udpAddr, err := net.ResolveUDPAddr(ProtocolUDP, addr)
// if err != nil {
// logger.Fatalf("UDP 地址解析失败: %v", err)
// }
// conn, err := net.ListenUDP(ProtocolUDP, udpAddr)
// if err != nil {
// logger.Fatalf("UDP 监听失败: %v", err)
// }
// logger.Printf("UDP 服务监听中: %s", addr)
// handleUDPConnection(conn)
// }()
//
// wg.Wait()
//}
//
//func handleTCPConnection(conn net.Conn) {
// defer conn.Close()
// // 简单的 Echo 服务:收到什么发回什么
// // 这样客户端可以通过计算发送时间和接收时间的差值来算出 RTT
// buf := make([]byte, 1024)
// for {
// conn.SetReadDeadline(time.Now().Add(5 * time.Second))
// n, err := conn.Read(buf)
// if err != nil {
// return
// }
// // 原样写回
// conn.Write(buf[:n])
// }
//}
//
//func handleUDPConnection(conn *net.UDPConn) {
// // UDP 处理逻辑:
// // 1. 接收客户端发来的包(统计 A->B 丢包)
// // 2. 收到特定的 "PONG_REQUEST" 指令后,向客户端反向发送一组包(用于 B->A 测试)
//
// buf := make([]byte, 1024)
// for {
// n, remoteAddr, err := conn.ReadFromUDP(buf)
// if err != nil {
// continue
// }
//
// data := string(buf[:n])
//
// // 如果收到的是反向测试请求
// if data == "TEST_B_TO_A" {
// go sendUDPBurst(conn, remoteAddr)
// }
// // 否则只是接收用于客户端计算发送成功率或者服务端不做处理完全由客户端通过TCP信令协调
// // 在本简化模型中我们采用Echo模式来计算UDP RTT丢包或者单向接收
// // 为了实现"B到A丢包",我们使用上面的 TEST_B_TO_A 触发器
// }
//}
//
//func sendUDPBurst(conn *net.UDPConn, addr *net.UDPAddr) {
// // 向客户端反向发送数据包
// for i := 0; i < LossTestCount; i++ {
// msg := []byte(fmt.Sprintf("SEQ:%d", i))
// conn.WriteToUDP(msg, addr)
// time.Sleep(10 * time.Millisecond) // 发送间隔,防止本地拥塞
// }
//}
//
//// ======================= 客户端逻辑 (Host A) =======================
//
//func runClient() {
// logger.Println("=== 启动主机 A (Client Mode) ===")
// logger.Printf("目标主机: %s, 周期: %d秒", *targetIP, *interval)
//
// ticker := time.NewTicker(time.Duration(*interval) * time.Second)
// defer ticker.Stop()
//
// // 立即执行一次
// performTest()
//
// for range ticker.C {
// performTest()
// }
//}
//
//func performTest() {
// result := TestResult{
// Timestamp: time.Now().Format("2006-01-02 15:04:05"),
// Target: *targetIP,
// }
//
// logger.Println("------------------------------------------------")
// logger.Printf("[%s] 开始新一轮测试...", result.Timestamp)
//
// // 1. 测试 TCP 延迟 (RTT)
// latency, jitter, err := testTCPLatency(*targetIP + ":" + *tcpPort)
// if err != nil {
// logger.Printf("TCP 连接失败: %v", err)
// } else {
// result.TCPLatencyMs = latency
// result.TCPJitterMs = jitter
// logger.Printf("TCP 延迟: %.2f ms, 抖动: %.2f ms", latency, jitter)
// }
//
// // 2. 测试 丢包率 (双向)
// // 注意:为了精确测试 B->A我们需要 B 配合发送
// lossA2B, lossB2A, err := testPacketLoss(*targetIP + ":" + *udpPort)
// if err != nil {
// logger.Printf("UDP 测试失败: %v", err)
// } else {
// result.LossRateAtoB = lossA2B
// result.LossRateBtoA = lossB2A
// logger.Printf("丢包率 A->B: %.1f%%, B->A: %.1f%%", lossA2B*100, lossB2A*100)
// }
//
// // 3. (可选) 路由追踪
// if *doTrace {
// route := performTraceRoute(*targetIP, *tcpPort)
// result.TraceRoute = route
// logger.Println("路由追踪完成")
// }
//
// // 4. 保存报告
// saveReport(result)
//}
//
//// testTCPLatency 发送多次 TCP 请求计算平均 RTT 和抖动
//func testTCPLatency(address string) (float64, float64, error) {
// conn, err := net.DialTimeout(ProtocolTCP, address, 3*time.Second)
// if err != nil {
// return 0, 0, err
// }
// defer conn.Close()
//
// var rtts []float64
// count := 5 // 测试5次取平均
//
// payload := []byte("PING_PAYLOAD_DATA_CHECK_LATENCY")
// buf := make([]byte, 1024)
//
// for i := 0; i < count; i++ {
// start := time.Now()
//
// _, err := conn.Write(payload)
// if err != nil {
// return 0, 0, err
// }
//
// conn.SetReadDeadline(time.Now().Add(2 * time.Second))
// _, err = conn.Read(buf)
// if err != nil {
// return 0, 0, err
// }
//
// rtt := float64(time.Since(start).Microseconds()) / 1000.0 // ms
// rtts = append(rtts, rtt)
// time.Sleep(100 * time.Millisecond)
// }
//
// // 计算平均值
// var sum float64
// for _, v := range rtts {
// sum += v
// }
// avg := sum / float64(len(rtts))
//
// // 计算抖动 (标准差 或 平均偏差)
// var varianceSum float64
// for _, v := range rtts {
// diff := v - avg
// varianceSum += diff * diff
// }
// jitter := varianceSum / float64(len(rtts)) // 简单方差作为抖动参考
//
// return avg, jitter, nil
//}
//
//// testPacketLoss 测试 UDP 丢包
//func testPacketLoss(address string) (float64, float64, error) {
// udpAddr, err := net.ResolveUDPAddr(ProtocolUDP, address)
// if err != nil {
// return 0, 0, err
// }
// conn, err := net.DialUDP(ProtocolUDP, nil, udpAddr)
// if err != nil {
// return 0, 0, err
// }
// defer conn.Close()
//
// // --- A -> B 测试 ---
// // 客户端发送 N 个包,我们假设如果服务端不报错且网络通畅,这里主要测试发送端的出口和路径
// // 严格的 A->B 需要服务端计数并通过 TCP 返回结果。
// // 这里简化逻辑:我们使用 "Echo" 模式来近似 A->B->A 的丢包,或者依赖应用层超时。
// // 但为了满足"双向"需求,我们采用以下策略:
//
// // 1. 发送触发指令,让 B 发数据给 A (测试 B->A)
// // 先开启监听准备接收
// localListener, err := net.ListenUDP(ProtocolUDP, nil) // 随机端口
// if err != nil {
// return 0, 0, fmt.Errorf("无法开启本地 UDP 监听: %v", err)
// }
// defer localListener.Close()
//
// // 告诉服务器:请发包给我 (TEST_B_TO_A)
// // 注意:由于 NAT 存在,服务器直接回发可能需要打洞。
// // 这里假设 A 和 B 之间是连通的(如专线或公网 IP直接发给原来的连接通常可行
// _, err = conn.Write([]byte("TEST_B_TO_A"))
// if err != nil {
// return 0, 0, err
// }
//
// // 接收 B 发来的包
// //receivedCountBtoA := 0
// localListener.SetReadDeadline(time.Now().Add(2 * time.Second))
// readBuf := make([]byte, 1024)
//
// // 快速循环读取
// startTime := time.Now()
// for time.Since(startTime) < 2*time.Second {
// // 这里有个技巧:真正的 P2P UDP 穿透较复杂。
// // 在此我们假设复用 conn 对象读取(如果这是 Connected UDP
// // 或者 B 发回给 conn 的源端口。
// // 简单起见,我们复用 conn 来读取 echo (如果 B 是 echo server) 或者 trigger result.
// // **修正策略**:为了保证代码极简且能跑,我们将 B 设计为:收到什么回什么 (Echo)。
// // 丢包率 = (发送总数 - 接收回显总数) / 发送总数
// // 这计算的是 A->B->A 的总丢包。
// // 如果必须拆分 A->B 和 B->A需要复杂的序列号协议。
//
// // 让我们回退到最稳健的 Echo 模式测试总丢包率,并在报告中标记为 "RTT Packet Loss"
// // 这通常足够反映链路质量。
// break
// }
//
// // --- 重新实现的稳健丢包测试 (基于 Echo) ---
// // 发送 Count 个包,看收回多少个
// successCount := 0
// for i := 0; i < LossTestCount; i++ {
// seqMsg := []byte(fmt.Sprintf("SEQ:%d", i))
// _, err := conn.Write(seqMsg)
// if err == nil {
// conn.SetReadDeadline(time.Now().Add(200 * time.Millisecond))
// n, _, err := conn.ReadFromUDP(readBuf)
// // 如果 B 实现了 TEST_B_TO_A 的特殊逻辑,这里可能会冲突。
// // 所以我们在 B 端代码里,对于非指令包,应该不回或者回显。
// // 修改 B 端:如果不是 "TEST_B_TO_A",则不做操作(单向) 或者 回显(RTT)。
// // 为了代码简单有效:我们假定 B->A 丢包率 ≈ A->B 丢包率 ≈ (1 - RTT回包率)/2
// if err == nil && n > 0 {
// successCount++
// }
// }
// time.Sleep(10 * time.Millisecond)
// }
//
// lossRate := 1.0 - (float64(successCount) / float64(LossTestCount))
//
// // 在简单 UDP 模型下A->B 和 B->A 往往是对称的,或者难以区分。
// // 这里我们将结果同时赋值给两者,代表链路质量
// return lossRate, lossRate, nil
//}
//
//// performTraceRoute 应用层简易路由追踪
//func performTraceRoute(target string, port string) []string {
// var hops []string
//
// // 真正的 traceroute 需要构造 IP 包并修改 TTL (需要 root 权限)。
// // 此处实现一个 "伪" traceroute 占位符,
// // 或者如果环境允许,可以调用系统命令。
// // 为了保持 Go 程序独立性,这里只做简单的连通性检查并记录。
// // 注意:非 Root 无法设置 socket IP_TTL 选项在某些系统上。
//
// hops = append(hops, "Traceroute 需要 Raw Socket 权限,在此模式下仅显示端到端跳跃")
// hops = append(hops, fmt.Sprintf("1. Local -> %s:%s (Direct/NAT)", target, port))
//
// return hops
//}
//
//func saveReport(result TestResult) {
// data, err := json.Marshal(result)
// if err != nil {
// logger.Printf("JSON 序列化失败: %v", err)
// return
// }
// // 写入文件JSON Lines 格式
// logger.Println("保存报告到文件...")
// // logger 已经配置了 MultiWriter所以上面的输出已经写入文件了
// // 这里我们只把纯 JSON 再次追加进去以便机器读取?
// // 为了避免日志混乱,建议只保留日志形式的报告。
// // 或者我们可以专门写一个 data 文件。
//
// f, err := os.OpenFile("net_report_data.jsonl", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
// if err == nil {
// defer f.Close()
// f.Write(data)
// f.WriteString("\n")
// }
//}
//
//// 辅助工具Int 转 Bytes
//func itob(v int) []byte {
// b := make([]byte, 8)
// binary.BigEndian.PutUint64(b, uint64(v))
// return b
//}

View File

@@ -0,0 +1,50 @@
go build -o netmonitor net_quality_monitor.go
#### 2. 部署与运行
**第一步:在主机 B 上启动服务端**
服务端将监听 TCP 28080 和 UDP 28081 端口,等待 A 的连接。
```bash
# Linux/Mac
./netmonitor -mode server -tcp 28080 -udp 28081
**第二步:在主机 A 上启动客户端**
客户端将每隔 5 秒测试一次到 140.238.52.228 的网络质量。
```bash
# 开启测试
./netmonitor -mode client -target 140.238.52.228 -tcp 28080 -udp 28081 -duration 10 -interval 60
# 德国
./netmonitor -mode client -target 43.154.83.213 -tcp 28080 -udp 28081 -duration 15 -interval 600
### 功能特点详解
1. **真实 TCP 交互**
* 代码中的 `testTCPLatency` 函数不使用 ICMP Ping而是通过 `net.DialTimeout` 建立完整的三次握手,并发送 Payload 数据。服务端接收并回写Echo
* 计算的时间包含了:`TCP握手时间` + `数据传输时间` + `ACK时间`。这比普通的 Ping 更能反映应用程序(如 HTTP/RPC的真实感受。
2. **UDP 丢包监测**
* `testPacketLoss` 采用 UDP 协议。UDP 是无连接的,不保证到达。
* 客户端连续快速发送 `LossTestCount` (默认20个) 包。如果接收端Echo模式下没有及时返回则判定为丢包。
* 这种方法能有效检测线路拥塞或防火墙限流情况。
3. **定时与报告**
* 程序使用 `time.Ticker` 保证精准的执行周期。
* 结果会同时输出到 **控制台** 和 **net_quality_report.log**。
* 另外生成 **net_report_data.jsonl**,每行一个 JSON 对象,方便后续通过脚本(如 Python/ELK进行图表分析。
4. **关于 Traceroute 的说明**
* 我在代码中预留了 `TraceRoute` 接口。
* *注意*:在 Go 语言中实现真正的 Traceroute修改 TTL需要引入 `golang.org/x/net/ipv4` 包并使用 Raw Socket这要求程序必须以 **root/管理员** 权限运行。为了保持代码作为一个简洁的“单文件”工具,且能保证在普通用户权限下运行,我没有包含 Raw Socket 代码。目前的实现是应用层层面的连通性检查。
### 报告样本
日志文件 (`net_report_data.jsonl`) 内容示例:
```json
{"timestamp":"2023-10-27 10:00:00","target":"192.168.1.200","tcp_latency_ms":12.5,"tcp_jitter_ms":1.2,"loss_rate_a_to_b":0.0,"loss_rate_b_to_a":0.0}
{"timestamp":"2023-10-27 10:00:10","target":"192.168.1.200","tcp_latency_ms":12.8,"tcp_jitter_ms":0.9,"loss_rate_a_to_b":0.05,"loss_rate_b_to_a":0.05}
你可以直接用 Excel 或 Python 读取这个文件来生成网络质量波动图。

View File

@@ -1,896 +0,0 @@
#---------------------------------------------------#
## 更新2022-09-28 09:02:50
## 感谢https://github.com/Hackl0us/SS-Rule-Snippet
## 链接https://link.oness.xyz/link/0fHPiayjsMIn6BUC?clash=1
#---------------------------------------------------#
# HTTP 代理端口
port: 7890
# SOCKS5 代理端口
socks-port: 7891
# Linux 和 macOS 的 redir 代理端口
redir-port: 7892
# 允许局域网的连接
allow-lan: true
# 规则模式Rule规则 / Global全局代理/ Direct全局直连
mode: Rule
# 设置日志输出级别 (默认级别silent即不输出任何内容以避免因日志内容过大而导致程序内存溢出
# 5 个级别silent / info / warning / error / debug。级别越高日志输出量越大越倾向于调试若需要请自行开启。
log-level: info
# Clash 的 RESTful API
external-controller: '0.0.0.0:9090'
# RESTful API 的口令
secret: ''
# 您可以将静态网页资源(如 clash-dashboard放置在一个目录中clash 将会服务于 `RESTful API/ui`
# 参数应填写配置目录的相对路径或绝对路径。
# external-ui: folder
proxies:
# vmess
# cipher support auto/aes-128-gcm/chacha20-poly1305/none
- name: "Tencent-Shanghai-Relay"
type: vmess
server: 42.192.52.227
port: 19999
uuid: 7318178c-5583-40dd-996c-a0add1f8fc1e
alterId: 0
cipher: auto
# udp: true
tls: false
skip-cert-verify: true
# servername: example.com # priority over wss host
network: http
http-opts:
host:
path:
- /v2ice-vmess-tcp-seoul
# headers:
# Host: v2ray.com
# max-early-data: 2048
# early-data-header-name: Sec-WebSocket-Protocol
proxy-groups:
- name: Proxy
type: select
# disable-udp: true
proxies:
- Tencent-Shanghai-Relay
- name: Direct
type: select
proxies:
- DIRECT
- name: Domestic
type: select
proxies:
- DIRECT
- Proxy
- name: Others
type: select
proxies:
- Proxy
- DIRECT
# 规则
rules:
# anti-ads
- DOMAIN-KEYWORD,adservice,REJECT
- DOMAIN-SUFFIX,adcolony.com,REJECT
- DOMAIN-SUFFIX,adinall.com,REJECT
- DOMAIN-SUFFIX,admaster.com.cn,REJECT
- DOMAIN-SUFFIX,admob.com,REJECT
- DOMAIN-SUFFIX,adnxs.com,REJECT
- DOMAIN-SUFFIX,adnyg.com,REJECT
- DOMAIN-SUFFIX,adsensor.org,REJECT
- DOMAIN-SUFFIX,adsymptotic.com,REJECT
- DOMAIN-SUFFIX,adthor.com,REJECT
- DOMAIN-SUFFIX,adwhirl.com,REJECT
- DOMAIN-SUFFIX,amazon-adsystem.com,REJECT
- DOMAIN-SUFFIX,amobee.com,REJECT
- DOMAIN-SUFFIX,app-adforce.jp,REJECT
- DOMAIN-SUFFIX,appads.com,REJECT
- DOMAIN-SUFFIX,appcpi.net,REJECT
- DOMAIN-SUFFIX,appier.net,REJECT
- DOMAIN-SUFFIX,applift.com,REJECT
- DOMAIN-SUFFIX,applovin.com,REJECT
- DOMAIN-SUFFIX,applvn.com,REJECT
- DOMAIN-SUFFIX,apsalar.com,REJECT
- DOMAIN-SUFFIX,apxadtracking.net,REJECT
- DOMAIN-SUFFIX,axonix.com,REJECT
- DOMAIN-SUFFIX,bayimob.com,REJECT
- DOMAIN-SUFFIX,bjvvqu.cn,REJECT
- DOMAIN-SUFFIX,bulldogcpi.com,REJECT
- DOMAIN-SUFFIX,clotfun.mobi,REJECT
- DOMAIN-SUFFIX,clotfun.online,REJECT
- DOMAIN-SUFFIX,cloudmobi.net,REJECT
- DOMAIN-SUFFIX,crwdcntrl.net,REJECT
- DOMAIN-SUFFIX,ctrmi.com,REJECT
- DOMAIN-SUFFIX,exosrv.com,REJECT
- DOMAIN-SUFFIX,go2cloud.org,REJECT
- DOMAIN-SUFFIX,growingio.com,REJECT
- DOMAIN-SUFFIX,haloapps.com,REJECT
- DOMAIN-SUFFIX,hypers.com,REJECT
- DOMAIN-SUFFIX,idealads.net,REJECT
- DOMAIN-SUFFIX,inmobi.cn,REJECT
- DOMAIN-SUFFIX,inmobi.com,REJECT
- DOMAIN-SUFFIX,inmobi.net,REJECT
- DOMAIN-SUFFIX,inmobicdn.cn,REJECT
- DOMAIN-SUFFIX,inmobicdn.net,REJECT
- DOMAIN-SUFFIX,inner-active.mobi,REJECT
- DOMAIN-SUFFIX,insurads.com,REJECT
- DOMAIN-SUFFIX,ironsrc.com,REJECT
- DOMAIN-SUFFIX,irs01.com,REJECT
- DOMAIN-SUFFIX,iskyworker.com,REJECT
- DOMAIN-SUFFIX,juicyads.com,REJECT
- DOMAIN-SUFFIX,kochava.com,REJECT
- DOMAIN-SUFFIX,leadboltmobile.net,REJECT
- DOMAIN-SUFFIX,lenzmx.com,REJECT
- DOMAIN-SUFFIX,liveadvert.com,REJECT
- DOMAIN-SUFFIX,lnk0.com,REJECT
- DOMAIN-SUFFIX,lnk8.cn,REJECT
- DOMAIN-SUFFIX,localytics.com,REJECT
- DOMAIN-SUFFIX,mdfull.com,REJECT
- DOMAIN-SUFFIX,measurementapi.com,REJECT
- DOMAIN-SUFFIX,medialytics.com,REJECT
- DOMAIN-SUFFIX,meetrics.com,REJECT
- DOMAIN-SUFFIX,meetrics.net,REJECT
- DOMAIN-SUFFIX,miaozhen.com,REJECT
- DOMAIN-SUFFIX,mmstat.com,REJECT
- DOMAIN-SUFFIX,moatads.com,REJECT
- DOMAIN-SUFFIX,mobclix.com,REJECT
- DOMAIN-SUFFIX,mopub.com,REJECT
- DOMAIN-SUFFIX,okjhb.xyz,REJECT
- DOMAIN-SUFFIX,openx.net,REJECT
- DOMAIN-SUFFIX,outbrain.com,REJECT
- DOMAIN-SUFFIX,pubmatic.com,REJECT
- DOMAIN-SUFFIX,qchannel01.cn,REJECT
- DOMAIN-SUFFIX,rayjump.com,REJECT
- DOMAIN-SUFFIX,rtbasia.com,REJECT
- DOMAIN-SUFFIX,rubiconproject.com,REJECT
- DOMAIN-SUFFIX,scorecardresearch.com,REJECT
- DOMAIN-SUFFIX,sdkclick.com,REJECT
- DOMAIN-SUFFIX,shuzilm.cn,REJECT
- DOMAIN-SUFFIX,smaato.net,REJECT
- DOMAIN-SUFFIX,smartadserver.com,REJECT
- DOMAIN-SUFFIX,smartnews-ads.com,REJECT
- DOMAIN-SUFFIX,supersonic.com,REJECT
- DOMAIN-SUFFIX,supersonicads.com,REJECT
- DOMAIN-SUFFIX,tagtic.cn,REJECT
- DOMAIN-SUFFIX,tanv.com,REJECT
- DOMAIN-SUFFIX,tanx.com,REJECT
- DOMAIN-SUFFIX,tapjoy.com,REJECT
- DOMAIN-SUFFIX,trafficjunky.net,REJECT
- DOMAIN-SUFFIX,turn.com,REJECT
- DOMAIN-SUFFIX,uri6.com,REJECT
- DOMAIN-SUFFIX,vidoomy.com,REJECT
- DOMAIN-SUFFIX,vungle.com,REJECT
- DOMAIN-SUFFIX,wedolook.com,REJECT
- DOMAIN-SUFFIX,xdrig.com,REJECT
- DOMAIN-SUFFIX,yumimobi.com,REJECT
- DOMAIN-SUFFIX,zu08e.cn,REJECT
- DOMAIN-SUFFIX,ad.cmvideo.cn,REJECT
- DOMAIN-SUFFIX,ad.daum.net,REJECT
- DOMAIN,abema-adx.ameba.jp,REJECT
- DOMAIN,ad.12306.cn,REJECT
- DOMAIN,ad.360in.com,REJECT
- DOMAIN,ad.51wnl-cq.com,REJECT
- DOMAIN,ad.caiyunapp.com,REJECT
- DOMAIN,ad.huajiao.com,REJECT
- DOMAIN,ad.hzyoka.com,REJECT
- DOMAIN,ad.jiemian.com,REJECT
- DOMAIN,ad.qingting.fm,REJECT
- DOMAIN,ad.wappalyzer.com,REJECT
- DOMAIN,ad-cn.jovcloud.com,REJECT
- DOMAIN,adextra.51wnl-cq.com,REJECT
- DOMAIN,api.adnet.mob.com,REJECT
- DOMAIN,ads.adadapted.com,REJECT
- DOMAIN,ads.chinadaily.com.cn,REJECT
- DOMAIN,ads.daydaycook.com.cn,REJECT
- DOMAIN,ads.weilitoutiao.net,REJECT
- DOMAIN,adsapi.manhuaren.com,REJECT
- DOMAIN,adsdk.dmzj.com,REJECT
- DOMAIN,adserver.pandora.com,REJECT
- DOMAIN,adshow.58.com,REJECT
- DOMAIN,adui.tg.meitu.com,REJECT
- DOMAIN,adv.bandi.so,REJECT
- DOMAIN,app-ad.variflight.com,REJECT
- DOMAIN,appnext.hs.llnwd.net,REJECT
- DOMAIN,appnext-a.akamaihd.net,REJECT
- DOMAIN,ggs.myzaker.com,REJECT
- DOMAIN,itad.linetv.tw,REJECT
- DOMAIN,ja.chushou.tv,REJECT
- DOMAIN,mads.suning.com,REJECT
- DOMAIN,mobileads.msn.com,REJECT
- DOMAIN,mopnativeadv.037201.com,REJECT
- DOMAIN,nativeadv.dftoutiao.com,REJECT
- DOMAIN-SUFFIX,iadsdk.apple.com,REJECT
- DOMAIN-SUFFIX,ads.internal.unity3d.com,REJECT
- DOMAIN-SUFFIX,ads.prd.ie.internal.unity3d.com,REJECT
- DOMAIN-SUFFIX,unityads.unity3d.com,REJECT
- DOMAIN,optimus-ads.amap.com,REJECT
- DOMAIN,optimus-ads.amap.com.w.alikunlun.com,REJECT
- DOMAIN,tunion-api.m.taobao.com,REJECT
- DOMAIN,adproxy.autohome.com.cn,REJECT
- DOMAIN,rd.autohome.com.cn,REJECT
- DOMAIN,al.autohome.com.cn,REJECT
- DOMAIN,applogapi.autohome.com.cn,REJECT
- DOMAIN-SUFFIX,cpro.baidu.com,REJECT
- DOMAIN-SUFFIX,pos.baidu.com,REJECT
- DOMAIN,afd.baidu.com,REJECT
- DOMAIN,als.baidu.com,REJECT
- DOMAIN,duclick.baidu.com,REJECT
- DOMAIN,mobads.baidu.com,REJECT
- DOMAIN,mobads-logs.baidu.com,REJECT
- DOMAIN,nsclick.baidu.com,REJECT
- DOMAIN,ad.toutiao.com,REJECT
- DOMAIN,adx.yiche.com,REJECT
- DOMAIN,log.ycapp.yiche.com,REJECT
- DOMAIN,advertise.baicizhan.com,REJECT
- DOMAIN,advertise.baicizhan.org,REJECT
- DOMAIN,galaxy.bjcathay.com,REJECT
- DOMAIN,mdrecv.app.cntvwb.cn,REJECT
- DOMAIN,sdapprecv.app.cntvwb.cn,REJECT
- DOMAIN,vdapprecv.app.cntvwb.cn,REJECT
- DOMAIN,ad.21cn.com,REJECT
- DOMAIN,ad.k.21cn.com,REJECT
- DOMAIN,admarket.21cn.com,REJECT
- DOMAIN,adshows.21cn.com,REJECT
- DOMAIN,atrace.chelaile.net.cn,REJECT
- DOMAIN,logs.chelaile.net.cn,REJECT
- DOMAIN-SUFFIX,doubleclick.net,REJECT
- DOMAIN-SUFFIX,googleadservices.com,REJECT
- DOMAIN-SUFFIX,googleadsserving.cn,REJECT
- DOMAIN-SUFFIX,googlesyndication.com,REJECT
- DOMAIN-SUFFIX,da.mgtv.com,REJECT
- DOMAIN-SUFFIX,da.hunantv.com,REJECT
- DOMAIN,adx.hupu.com,REJECT
- DOMAIN,adx-api.hupu.com,REJECT
- DOMAIN,goblin.hupu.com,REJECT
- DOMAIN,t7z.cupid.iqiyi.com,REJECT
- IP-CIDR,101.227.97.240/32,REJECT,no-resolve
- IP-CIDR,101.227.200.11/32,REJECT,no-resolve
- IP-CIDR,101.227.200.28/32,REJECT,no-resolve
- IP-CIDR,124.192.153.42/32,REJECT,no-resolve
- DOMAIN-SUFFIX,deliver.ifeng.com,REJECT
- DOMAIN,api.newad.ifeng.com,REJECT
- DOMAIN,ifengad.3g.ifeng.com,REJECT
- DOMAIN,adserviceretry.kugou.com,REJECT
- DOMAIN,ads.service.kugou.com,REJECT
- DOMAIN,adsfile.bssdlbig.kugou.com,REJECT
- DOMAIN,g.koowo.com,REJECT
- DOMAIN,kgmobilestat.kugou.com,REJECT
- DOMAIN,kgmobilestatbak.kugou.com,REJECT
- DOMAIN,mobilelog.kugou.com,REJECT
- DOMAIN,mobilead.kuwo.cn,REJECT
- DOMAIN,rich.kuwo.cn,REJECT
- DOMAIN,ad-stat.ksosoft.com,REJECT
- DOMAIN,img.auction-ads.wpscdn.cn,REJECT
- DOMAIN,counter.kingsoft.com,REJECT
- DOMAIN,counter.ksosoft.com,REJECT
- DOMAIN,minfo.wps.cn,REJECT
- DOMAIN,mobad.ijinshan.com,REJECT
- DOMAIN,ups.ksmobile.net,REJECT
- DOMAIN,ws.ksmobile.net,REJECT
- DOMAIN-SUFFIX,webp2p.letv.com,REJECT
- DOMAIN,ark.letv.com,REJECT
- DOMAIN,emma-414870e223.huodonghezi.com,REJECT
- DOMAIN,g3.letv.com,REJECT
- DOMAIN,n.mark.letv.com,REJECT
- DOMAIN,ad.hpplay.cn,REJECT
- DOMAIN,adcdn.hpplay.cn,REJECT
- DOMAIN,adeng.hpplay.cn,REJECT
- DOMAIN,rp.hpplay.cn,REJECT
- DOMAIN-SUFFIX,ad.intl.xiaomi.com,REJECT
- DOMAIN-SUFFIX,ad.xiaomi.com,REJECT
- DOMAIN-SUFFIX,admob.xiaomi.com,REJECT
- DOMAIN,adv.sec.intl.miui.com,REJECT
- DOMAIN,adv.sec.miui.com,REJECT
- DOMAIN,ad.api.moji.com,REJECT
- DOMAIN,adlaunch.moji.com,REJECT
- DOMAIN,ads.mojicdn.com,REJECT
- DOMAIN,v1.log.moji.com,REJECT
- DOMAIN,ad.bn.netease.com,REJECT
- DOMAIN,ad.yixin.im,REJECT
- DOMAIN,admusicpic.music.126.net,REJECT
- DOMAIN,gorgon.youdao.com,REJECT
- DOMAIN,iadmat.nosdn.127.net,REJECT
- DOMAIN,iadmusicmat.music.126.net,REJECT
- DOMAIN,iadmusicmatvideo.music.126.net,REJECT
- DOMAIN,impservice.dictapp.youdao.com,REJECT
- DOMAIN,impservice.youdao.com,REJECT
- DOMAIN,log.yex.youdao.com,REJECT
- DOMAIN,log-yex.youdao.com,REJECT
- DOMAIN,n.3g.163.com,REJECT
- DOMAIN,nex.163.com,REJECT
- DOMAIN,yt-adp.nosdn.127.net,REJECT
- DOMAIN,yt-adp.ws.126.net,REJECT
- DOMAIN,ads.aplus.pptv.com,REJECT
- DOMAIN,ads.aplusapi.pptv.com,REJECT
- DOMAIN,asimgs.pplive.cn,REJECT
- DOMAIN,de.as.pptv.com,REJECT
- DOMAIN,regist.fotoable.com,REJECT
- DOMAIN,cdn.adapi.fotoable.com,REJECT
- DOMAIN,adnew.wifi8.com,REJECT
- DOMAIN,adfile.wifi8.com,REJECT
- DOMAIN-SUFFIX,beacon.sina.com.cn,REJECT
- DOMAIN,adimg.vue.weibo.com,REJECT
- DOMAIN,u1.img.mobile.sina.cn,REJECT
- DOMAIN,sax.sina.com.cn,REJECT
- DOMAIN,saxs.sina.com.cn,REJECT
- DOMAIN,saxn.sina.com.cn,REJECT
- DOMAIN-SUFFIX,ad.sohu.com,REJECT
- DOMAIN-SUFFIX,ads.sohu.com,REJECT
- DOMAIN-SUFFIX,aty.sohu.com,REJECT
- DOMAIN,imp.optaim.com,REJECT
- DOMAIN,v2.reachmax.cn,REJECT
- DOMAIN,track.sohu.com,REJECT
- DOMAIN,hui.sohu.com,REJECT
- DOMAIN-SUFFIX,e.qq.com,REJECT
- DOMAIN-SUFFIX,gdt.qq.com,REJECT
- DOMAIN-SUFFIX,l.qq.com,REJECT
- DOMAIN,adsmind.apdcdn.tc.qq.com,REJECT
- DOMAIN,adsmind.gdtimg.com,REJECT
- DOMAIN,adsmind.tc.qq.com,REJECT
- DOMAIN,pgdt.gtimg.cn,REJECT
- DOMAIN,pgdt.gtimg.com,REJECT
- DOMAIN,pgdt.ugdtimg.com,REJECT
- DOMAIN,splashqqlive.gtimg.com,REJECT
- DOMAIN,wa.gtimg.com,REJECT
- DOMAIN,wxsnsdy.wxs.qq.com,REJECT
- DOMAIN,wxsnsdythumb.wxs.qq.com,REJECT
- DOMAIN,admonitor.thepaper.cn,REJECT
- DOMAIN,adpai.thepaper.cn,REJECT
- DOMAIN,imgadpai.thepaper.cn,REJECT
- DOMAIN,adsp.xunlei.com,REJECT
- DOMAIN,etl.xlmc.sandai.net,REJECT
- DOMAIN,adm.10jqka.com.cn,REJECT
- DOMAIN,stat.10jqka.com.cn,REJECT
- DOMAIN,ad-analysis.pconline.com.cn,REJECT
- DOMAIN,iad0ssl.pcauto.com.cn,REJECT
- DOMAIN,iad0ssl.pconline.com.cn,REJECT
- DOMAIN,imgad0.pcauto.com.cn,REJECT
- DOMAIN,imgad0.pconline.com.cn,REJECT
- DOMAIN,ivy.pchouse.com.cn,REJECT
- DOMAIN,a.wkanx.com,REJECT
- DOMAIN,cwx.lianwangtech.com,REJECT
- DOMAIN,c1wx.lianwangtech.com,REJECT
- DOMAIN,ad.ximalaya.com,REJECT
- DOMAIN,adbs.ximalaya.com,REJECT
- DOMAIN,adse.ximalaya.com,REJECT
- DOMAIN,adse.wsa.ximalaya.com,REJECT
- DOMAIN,adbehavior.wsa.ximalaya.com,REJECT
- DOMAIN,adsebs.ximalaya.com,REJECT
- DOMAIN,ads-img-qc.xhscdn.com,REJECT
- DOMAIN,ads-video-qc.xhscdn.com,REJECT
- DOMAIN,t-ads.xiaohongshu.com,REJECT
- DOMAIN-SUFFIX,atm.youku.com,REJECT
- DOMAIN,ad.mobile.youku.com,REJECT
- DOMAIN,iyes.youku.com,REJECT
- DOMAIN,apppv.zol.com.cn,REJECT
- DOMAIN,pvnapp.zol.com.cn,REJECT
# (DNS Cache Pollution Protection)
# > Google
- DOMAIN-SUFFIX,appspot.com,Proxy
- DOMAIN-SUFFIX,blogger.com,Proxy
- DOMAIN-SUFFIX,getoutline.org,Proxy
- DOMAIN-SUFFIX,gvt0.com,Proxy
- DOMAIN-SUFFIX,gvt1.com,Proxy
- DOMAIN-SUFFIX,gvt3.com,Proxy
- DOMAIN-SUFFIX,xn--ngstr-lra8j.com,Proxy
- DOMAIN-KEYWORD,google,Proxy
- DOMAIN-KEYWORD,blogspot,Proxy
# > Facebook
- DOMAIN-SUFFIX,cdninstagram.com,Proxy
- DOMAIN-SUFFIX,fb.com,Proxy
- DOMAIN-SUFFIX,fb.me,Proxy
- DOMAIN-SUFFIX,fbaddins.com,Proxy
- DOMAIN-SUFFIX,fbcdn.net,Proxy
- DOMAIN-SUFFIX,fbsbx.com,Proxy
- DOMAIN-SUFFIX,fbworkmail.com,Proxy
- DOMAIN-SUFFIX,instagram.com,Proxy
- DOMAIN-SUFFIX,m.me,Proxy
- DOMAIN-SUFFIX,messenger.com,Proxy
- DOMAIN-SUFFIX,oculus.com,Proxy
- DOMAIN-SUFFIX,oculuscdn.com,Proxy
- DOMAIN-SUFFIX,rocksdb.org,Proxy
- DOMAIN-SUFFIX,whatsapp.com,Proxy
- DOMAIN-SUFFIX,whatsapp.net,Proxy
- DOMAIN-KEYWORD,facebook,Proxy
# > Twitter
- DOMAIN-SUFFIX,pscp.tv,Proxy
- DOMAIN-SUFFIX,periscope.tv,Proxy
- DOMAIN-SUFFIX,t.co,Proxy
- DOMAIN-SUFFIX,twimg.co,Proxy
- DOMAIN-SUFFIX,twimg.com,Proxy
- DOMAIN-SUFFIX,twitpic.com,Proxy
- DOMAIN-SUFFIX,vine.co,Proxy
- DOMAIN-KEYWORD,twitter,Proxy
# > Telegram
- DOMAIN-SUFFIX,t.me,Proxy
- DOMAIN-SUFFIX,tdesktop.com,Proxy
- DOMAIN-SUFFIX,telegra.ph,Proxy
- DOMAIN-SUFFIX,telegram.me,Proxy
- DOMAIN-SUFFIX,telegram.org,Proxy
# > Line
- DOMAIN-SUFFIX,line.me,Proxy
- DOMAIN-SUFFIX,line-apps.com,Proxy
- DOMAIN-SUFFIX,line-scdn.net,Proxy
- DOMAIN-SUFFIX,naver.jp,Proxy
# > Other
- DOMAIN-SUFFIX,4shared.com,Proxy
- DOMAIN-SUFFIX,881903.com,Proxy
- DOMAIN-SUFFIX,abc.net.au,Proxy
- DOMAIN-SUFFIX,abebooks.com,Proxy
- DOMAIN-SUFFIX,amazon.co.jp,Proxy
- DOMAIN-SUFFIX,apigee.com,Proxy
- DOMAIN-SUFFIX,apk-dl.com,Proxy
- DOMAIN-SUFFIX,apkmirror.com,Proxy
- DOMAIN-SUFFIX,apkmonk.com,Proxy
- DOMAIN-SUFFIX,apkpure.com,Proxy
- DOMAIN-SUFFIX,aptoide.com,Proxy
- DOMAIN-SUFFIX,archive.is,Proxy
- DOMAIN-SUFFIX,archive.org,Proxy
- DOMAIN-SUFFIX,arte.tv,Proxy
- DOMAIN-SUFFIX,ask.com,Proxy
- DOMAIN-SUFFIX,avgle.com,Proxy
- DOMAIN-SUFFIX,badoo.com,Proxy
- DOMAIN-SUFFIX,bandwagonhost.com,Proxy
- DOMAIN-SUFFIX,bbc.com,Proxy
- DOMAIN-SUFFIX,behance.net,Proxy
- DOMAIN-SUFFIX,bibox.com,Proxy
- DOMAIN-SUFFIX,biggo.com.tw,Proxy
- DOMAIN-SUFFIX,binance.com,Proxy
- DOMAIN-SUFFIX,bitcointalk.org,Proxy
- DOMAIN-SUFFIX,bitfinex.com,Proxy
- DOMAIN-SUFFIX,bitmex.com,Proxy
- DOMAIN-SUFFIX,bit-z.com,Proxy
- DOMAIN-SUFFIX,bloglovin.com,Proxy
- DOMAIN-SUFFIX,bloomberg.cn,Proxy
- DOMAIN-SUFFIX,bloomberg.com,Proxy
- DOMAIN-SUFFIX,book.com.tw,Proxy
- DOMAIN-SUFFIX,booklive.jp,Proxy
- DOMAIN-SUFFIX,books.com.tw,Proxy
- DOMAIN-SUFFIX,box.com,Proxy
- DOMAIN-SUFFIX,brookings.edu,Proxy
- DOMAIN-SUFFIX,businessinsider.com,Proxy
- DOMAIN-SUFFIX,bwh1.net,Proxy
- DOMAIN-SUFFIX,castbox.fm,Proxy
- DOMAIN-SUFFIX,cbc.ca,Proxy
- DOMAIN-SUFFIX,cdw.com,Proxy
- DOMAIN-SUFFIX,change.org,Proxy
- DOMAIN-SUFFIX,ck101.com,Proxy
- DOMAIN-SUFFIX,clarionproject.org,Proxy
- DOMAIN-SUFFIX,clyp.it,Proxy
- DOMAIN-SUFFIX,cna.com.tw,Proxy
- DOMAIN-SUFFIX,comparitech.com,Proxy
- DOMAIN-SUFFIX,conoha.jp,Proxy
- DOMAIN-SUFFIX,crucial.com,Proxy
- DOMAIN-SUFFIX,cts.com.tw,Proxy
- DOMAIN-SUFFIX,cw.com.tw,Proxy
- DOMAIN-SUFFIX,cyberctm.com,Proxy
- DOMAIN-SUFFIX,dailymotion.com,Proxy
- DOMAIN-SUFFIX,dailyview.tw,Proxy
- DOMAIN-SUFFIX,daum.net,Proxy
- DOMAIN-SUFFIX,daumcdn.net,Proxy
- DOMAIN-SUFFIX,dcard.tw,Proxy
- DOMAIN-SUFFIX,deepdiscount.com,Proxy
- DOMAIN-SUFFIX,deezer.com,Proxy
- DOMAIN-SUFFIX,depositphotos.com,Proxy
- DOMAIN-SUFFIX,disconnect.me,Proxy
- DOMAIN-SUFFIX,discordapp.com,Proxy
- DOMAIN-SUFFIX,discordapp.net,Proxy
- DOMAIN-SUFFIX,disqus.com,Proxy
- DOMAIN-SUFFIX,dns2go.com,Proxy
- DOMAIN-SUFFIX,dropbox.com,Proxy
- DOMAIN-SUFFIX,dropboxusercontent.com,Proxy
- DOMAIN-SUFFIX,duckduckgo.com,Proxy
- DOMAIN-SUFFIX,dw.com,Proxy
- DOMAIN-SUFFIX,dynu.com,Proxy
- DOMAIN-SUFFIX,earthcam.com,Proxy
- DOMAIN-SUFFIX,ebookservice.tw,Proxy
- DOMAIN-SUFFIX,economist.com,Proxy
- DOMAIN-SUFFIX,edgecastcdn.net,Proxy
- DOMAIN-SUFFIX,edu,Proxy
- DOMAIN-SUFFIX,elpais.com,Proxy
- DOMAIN-SUFFIX,enanyang.my,Proxy
- DOMAIN-SUFFIX,euronews.com,Proxy
- DOMAIN-SUFFIX,feedly.com,Proxy
- DOMAIN-SUFFIX,files.wordpress.com,Proxy
- DOMAIN-SUFFIX,flickr.com,Proxy
- DOMAIN-SUFFIX,flitto.com,Proxy
- DOMAIN-SUFFIX,foreignpolicy.com,Proxy
- DOMAIN-SUFFIX,friday.tw,Proxy
- DOMAIN-SUFFIX,gate.io,Proxy
- DOMAIN-SUFFIX,getlantern.org,Proxy
- DOMAIN-SUFFIX,getsync.com,Proxy
- DOMAIN-SUFFIX,globalvoices.org,Proxy
- DOMAIN-SUFFIX,goo.ne.jp,Proxy
- DOMAIN-SUFFIX,goodreads.com,Proxy
- DOMAIN-SUFFIX,gov.tw,Proxy
- DOMAIN-SUFFIX,gumroad.com,Proxy
- DOMAIN-SUFFIX,hbg.com,Proxy
- DOMAIN-SUFFIX,hightail.com,Proxy
- DOMAIN-SUFFIX,hk01.com,Proxy
- DOMAIN-SUFFIX,hkbf.org,Proxy
- DOMAIN-SUFFIX,hkbookcity.com,Proxy
- DOMAIN-SUFFIX,hkej.com,Proxy
- DOMAIN-SUFFIX,hket.com,Proxy
- DOMAIN-SUFFIX,hkgolden.com,Proxy
- DOMAIN-SUFFIX,hootsuite.com,Proxy
- DOMAIN-SUFFIX,hudson.org,Proxy
- DOMAIN-SUFFIX,huobi.pro,Proxy
- DOMAIN-SUFFIX,initiummall.com,Proxy
- DOMAIN-SUFFIX,ipfs.io,Proxy
- DOMAIN-SUFFIX,issuu.com,Proxy
- DOMAIN-SUFFIX,japantimes.co.jp,Proxy
- DOMAIN-SUFFIX,jiji.com,Proxy
- DOMAIN-SUFFIX,jinx.com,Proxy
- DOMAIN-SUFFIX,jkforum.net,Proxy
- DOMAIN-SUFFIX,joinmastodon.org,Proxy
- DOMAIN-SUFFIX,kakao.com,Proxy
- DOMAIN-SUFFIX,lihkg.com,Proxy
- DOMAIN-SUFFIX,live.com,Proxy
- DOMAIN-SUFFIX,mail.ru,Proxy
- DOMAIN-SUFFIX,matters.news,Proxy
- DOMAIN-SUFFIX,medium.com,Proxy
- DOMAIN-SUFFIX,mega.nz,Proxy
- DOMAIN-SUFFIX,mil,Proxy
- DOMAIN-SUFFIX,mobile01.com,Proxy
- DOMAIN-SUFFIX,naver.com,Proxy
- DOMAIN-SUFFIX,nikkei.com,Proxy
- DOMAIN-SUFFIX,nofile.io,Proxy
- DOMAIN-SUFFIX,now.com,Proxy
- DOMAIN-SUFFIX,nyt.com,Proxy
- DOMAIN-SUFFIX,nytchina.com,Proxy
- DOMAIN-SUFFIX,nytcn.me,Proxy
- DOMAIN-SUFFIX,nytco.com,Proxy
- DOMAIN-SUFFIX,nytimes.com,Proxy
- DOMAIN-SUFFIX,nytimg.com,Proxy
- DOMAIN-SUFFIX,nytlog.com,Proxy
- DOMAIN-SUFFIX,nytstyle.com,Proxy
- DOMAIN-SUFFIX,ok.ru,Proxy
- DOMAIN-SUFFIX,okex.com,Proxy
- DOMAIN-SUFFIX,pcloud.com,Proxy
- DOMAIN-SUFFIX,pinimg.com,Proxy
- DOMAIN-SUFFIX,pixiv.net,Proxy
- DOMAIN-SUFFIX,pornhub.com,Proxy
- DOMAIN-SUFFIX,pureapk.com,Proxy
- DOMAIN-SUFFIX,quora.com,Proxy
- DOMAIN-SUFFIX,quoracdn.net,Proxy
- DOMAIN-SUFFIX,rakuten.co.jp,Proxy
- DOMAIN-SUFFIX,reddit.com,Proxy
- DOMAIN-SUFFIX,redditmedia.com,Proxy
- DOMAIN-SUFFIX,resilio.com,Proxy
- DOMAIN-SUFFIX,reuters.com,Proxy
- DOMAIN-SUFFIX,scmp.com,Proxy
- DOMAIN-SUFFIX,scribd.com,Proxy
- DOMAIN-SUFFIX,seatguru.com,Proxy
- DOMAIN-SUFFIX,shadowsocks.org,Proxy
- DOMAIN-SUFFIX,slideshare.net,Proxy
- DOMAIN-SUFFIX,soundcloud.com,Proxy
- DOMAIN-SUFFIX,startpage.com,Proxy
- DOMAIN-SUFFIX,steamcommunity.com,Proxy
- DOMAIN-SUFFIX,steemit.com,Proxy
- DOMAIN-SUFFIX,t66y.com,Proxy
- DOMAIN-SUFFIX,teco-hk.org,Proxy
- DOMAIN-SUFFIX,teco-mo.org,Proxy
- DOMAIN-SUFFIX,teddysun.com,Proxy
- DOMAIN-SUFFIX,theinitium.com,Proxy
- DOMAIN-SUFFIX,tineye.com,Proxy
- DOMAIN-SUFFIX,torproject.org,Proxy
- DOMAIN-SUFFIX,tumblr.com,Proxy
- DOMAIN-SUFFIX,turbobit.net,Proxy
- DOMAIN-SUFFIX,twitch.tv,Proxy
- DOMAIN-SUFFIX,udn.com,Proxy
- DOMAIN-SUFFIX,unseen.is,Proxy
- DOMAIN-SUFFIX,upmedia.mg,Proxy
- DOMAIN-SUFFIX,uptodown.com,Proxy
- DOMAIN-SUFFIX,ustream.tv,Proxy
- DOMAIN-SUFFIX,uwants.com,Proxy
- DOMAIN-SUFFIX,v2ray.com,Proxy
- DOMAIN-SUFFIX,viber.com,Proxy
- DOMAIN-SUFFIX,videopress.com,Proxy
- DOMAIN-SUFFIX,vimeo.com,Proxy
- DOMAIN-SUFFIX,voxer.com,Proxy
- DOMAIN-SUFFIX,vzw.com,Proxy
- DOMAIN-SUFFIX,w3schools.com,Proxy
- DOMAIN-SUFFIX,wattpad.com,Proxy
- DOMAIN-SUFFIX,whoer.net,Proxy
- DOMAIN-SUFFIX,wikimapia.org,Proxy
- DOMAIN-SUFFIX,wikipedia.org,Proxy
- DOMAIN-SUFFIX,wire.com,Proxy
- DOMAIN-SUFFIX,worldcat.org,Proxy
- DOMAIN-SUFFIX,wsj.com,Proxy
- DOMAIN-SUFFIX,wsj.net,Proxy
- DOMAIN-SUFFIX,xboxlive.com,Proxy
- DOMAIN-SUFFIX,xvideos.com,Proxy
- DOMAIN-SUFFIX,yahoo.com,Proxy
- DOMAIN-SUFFIX,yesasia.com,Proxy
- DOMAIN-SUFFIX,yes-news.com,Proxy
- DOMAIN-SUFFIX,yomiuri.co.jp,Proxy
- DOMAIN-SUFFIX,you-get.org,Proxy
- DOMAIN-SUFFIX,zb.com,Proxy
- DOMAIN-SUFFIX,zello.com,Proxy
- DOMAIN-SUFFIX,zeronet.io,Proxy
- DOMAIN,cdn-images.mailchimp.com,Proxy
- DOMAIN,id.heroku.com,Proxy
- DOMAIN-KEYWORD,github,Proxy
- DOMAIN-KEYWORD,jav,Proxy
- DOMAIN-KEYWORD,pinterest,Proxy
- DOMAIN-KEYWORD,porn,Proxy
- DOMAIN-KEYWORD,wikileaks,Proxy
# (Region-Restricted Access Denied)
- DOMAIN-SUFFIX,apartmentratings.com,Proxy
- DOMAIN-SUFFIX,apartments.com,Proxy
- DOMAIN-SUFFIX,bankmobilevibe.com,Proxy
- DOMAIN-SUFFIX,bing.com,Proxy
- DOMAIN-SUFFIX,booktopia.com.au,Proxy
- DOMAIN-SUFFIX,centauro.com.br,Proxy
- DOMAIN-SUFFIX,clearsurance.com,Proxy
- DOMAIN-SUFFIX,costco.com,Proxy
- DOMAIN-SUFFIX,crackle.com,Proxy
- DOMAIN-SUFFIX,depositphotos.cn,Proxy
- DOMAIN-SUFFIX,dish.com,Proxy
- DOMAIN-SUFFIX,dmm.co.jp,Proxy
- DOMAIN-SUFFIX,dmm.com,Proxy
- DOMAIN-SUFFIX,dnvod.tv,Proxy
- DOMAIN-SUFFIX,esurance.com,Proxy
- DOMAIN-SUFFIX,extmatrix.com,Proxy
- DOMAIN-SUFFIX,fastpic.ru,Proxy
- DOMAIN-SUFFIX,flipboard.com,Proxy
- DOMAIN-SUFFIX,fnac.be,Proxy
- DOMAIN-SUFFIX,fnac.com,Proxy
- DOMAIN-SUFFIX,funkyimg.com,Proxy
- DOMAIN-SUFFIX,fxnetworks.com,Proxy
- DOMAIN-SUFFIX,gettyimages.com,Proxy
- DOMAIN-SUFFIX,jcpenney.com,Proxy
- DOMAIN-SUFFIX,kknews.cc,Proxy
- DOMAIN-SUFFIX,nationwide.com,Proxy
- DOMAIN-SUFFIX,nbc.com,Proxy
- DOMAIN-SUFFIX,nordstrom.com,Proxy
- DOMAIN-SUFFIX,nordstromimage.com,Proxy
- DOMAIN-SUFFIX,nordstromrack.com,Proxy
- DOMAIN-SUFFIX,read01.com,Proxy
- DOMAIN-SUFFIX,superpages.com,Proxy
- DOMAIN-SUFFIX,target.com,Proxy
- DOMAIN-SUFFIX,thinkgeek.com,Proxy
- DOMAIN-SUFFIX,tracfone.com,Proxy
- DOMAIN-SUFFIX,uploader.jp,Proxy
- DOMAIN-SUFFIX,vevo.com,Proxy
- DOMAIN-SUFFIX,viu.tv,Proxy
- DOMAIN-SUFFIX,vk.com,Proxy
- DOMAIN-SUFFIX,vsco.co,Proxy
- DOMAIN-SUFFIX,xfinity.com,Proxy
- DOMAIN-SUFFIX,zattoo.com,Proxy
- DOMAIN,abc.com,Proxy
- DOMAIN,abc.go.com,Proxy
- DOMAIN,abc.net.au,Proxy
- DOMAIN,wego.here.com,Proxy
# > Telegram
- DOMAIN-SUFFIX,t.me,Proxy
- DOMAIN-SUFFIX,tdesktop.com,Proxy
- DOMAIN-SUFFIX,telegra.ph,Proxy
- DOMAIN-SUFFIX,telegram.me,Proxy
- DOMAIN-SUFFIX,telegram.org,Proxy
- IP-CIDR,91.108.0.0/16,Proxy,no-resolve
- IP-CIDR,109.239.140.0/24,Proxy,no-resolve
- IP-CIDR,149.154.160.0/20,Proxy,no-resolve
- IP-CIDR6,2001:67c:4e8::/48,Proxy,no-resolve
- IP-CIDR6,2001:b28:f23d::/48,Proxy,no-resolve
- IP-CIDR6,2001:b28:f23f::/48,Proxy,no-resolve
#USER-AGENT,Roam*,Proxy
# (The Most Popular Sites)
# > Apple
# > Apple URL Shortener
- DOMAIN-SUFFIX,appsto.re,Proxy
# > TestFlight
- DOMAIN,beta.itunes.apple.com,Proxy
# > iBooks Store download
- DOMAIN,books.itunes.apple.com,Proxy
# > iTunes Store Moveis Trailers
- DOMAIN,hls.itunes.apple.com,Proxy
# App Store Preview
- DOMAIN,itunes.apple.com,Proxy
# > Spotlight
- DOMAIN,api-glb-sea.smoot.apple.com,Proxy
# > Dictionary
- DOMAIN,lookup-api.apple.com,Proxy
#PROCESS-NAME,LookupViewService,Proxy
# > Google
- DOMAIN-SUFFIX,abc.xyz,Proxy
- DOMAIN-SUFFIX,android.com,Proxy
- DOMAIN-SUFFIX,androidify.com,Proxy
- DOMAIN-SUFFIX,dialogflow.com,Proxy
- DOMAIN-SUFFIX,autodraw.com,Proxy
- DOMAIN-SUFFIX,capitalg.com,Proxy
- DOMAIN-SUFFIX,certificate-transparency.org,Proxy
- DOMAIN-SUFFIX,chrome.com,Proxy
- DOMAIN-SUFFIX,chromeexperiments.com,Proxy
- DOMAIN-SUFFIX,chromestatus.com,Proxy
- DOMAIN-SUFFIX,chromium.org,Proxy
- DOMAIN-SUFFIX,creativelab5.com,Proxy
- DOMAIN-SUFFIX,debug.com,Proxy
- DOMAIN-SUFFIX,deepmind.com,Proxy
- DOMAIN-SUFFIX,firebaseio.com,Proxy
- DOMAIN-SUFFIX,getmdl.io,Proxy
- DOMAIN-SUFFIX,ggpht.com,Proxy
- DOMAIN-SUFFIX,gmail.com,Proxy
- DOMAIN-SUFFIX,gmodules.com,Proxy
- DOMAIN-SUFFIX,godoc.org,Proxy
- DOMAIN-SUFFIX,golang.org,Proxy
- DOMAIN-SUFFIX,gstatic.com,Proxy
- DOMAIN-SUFFIX,gv.com,Proxy
- DOMAIN-SUFFIX,gwtproject.org,Proxy
- DOMAIN-SUFFIX,itasoftware.com,Proxy
- DOMAIN-SUFFIX,madewithcode.com,Proxy
- DOMAIN-SUFFIX,material.io,Proxy
- DOMAIN-SUFFIX,polymer-project.org,Proxy
- DOMAIN-SUFFIX,admin.recaptcha.net,Proxy
- DOMAIN-SUFFIX,recaptcha.net,Proxy
- DOMAIN-SUFFIX,shattered.io,Proxy
- DOMAIN-SUFFIX,synergyse.com,Proxy
- DOMAIN-SUFFIX,tensorflow.org,Proxy
- DOMAIN-SUFFIX,tiltbrush.com,Proxy
- DOMAIN-SUFFIX,waveprotocol.org,Proxy
- DOMAIN-SUFFIX,waymo.com,Proxy
- DOMAIN-SUFFIX,webmproject.org,Proxy
- DOMAIN-SUFFIX,webrtc.org,Proxy
- DOMAIN-SUFFIX,whatbrowser.org,Proxy
- DOMAIN-SUFFIX,widevine.com,Proxy
- DOMAIN-SUFFIX,x.company,Proxy
- DOMAIN-SUFFIX,youtu.be,Proxy
- DOMAIN-SUFFIX,yt.be,Proxy
- DOMAIN-SUFFIX,ytimg.com,Proxy
# > Steam
- DOMAIN,media.steampowered.com,Proxy
- DOMAIN,store.steampowered.com,Proxy
# > Other
- DOMAIN-SUFFIX,0rz.tw,Proxy
- DOMAIN-SUFFIX,4bluestones.biz,Proxy
- DOMAIN-SUFFIX,9bis.net,Proxy
- DOMAIN-SUFFIX,allconnected.co,Proxy
- DOMAIN-SUFFIX,amazonaws.com,Proxy
- DOMAIN-SUFFIX,aol.com,Proxy
- DOMAIN-SUFFIX,bcc.com.tw,Proxy
- DOMAIN-SUFFIX,bit.ly,Proxy
- DOMAIN-SUFFIX,bitshare.com,Proxy
- DOMAIN-SUFFIX,blog.jp,Proxy
- DOMAIN-SUFFIX,blogimg.jp,Proxy
- DOMAIN-SUFFIX,blogtd.org,Proxy
- DOMAIN-SUFFIX,broadcast.co.nz,Proxy
- DOMAIN-SUFFIX,camfrog.com,Proxy
- DOMAIN-SUFFIX,cfos.de,Proxy
- DOMAIN-SUFFIX,citypopulation.de,Proxy
- DOMAIN-SUFFIX,cloudfront.net,Proxy
- DOMAIN-SUFFIX,ctitv.com.tw,Proxy
- DOMAIN-SUFFIX,cuhk.edu.hk,Proxy
- DOMAIN-SUFFIX,cusu.hk,Proxy
- DOMAIN-SUFFIX,discuss.com.hk,Proxy
- DOMAIN-SUFFIX,dropboxapi.com,Proxy
- DOMAIN-SUFFIX,edditstatic.com,Proxy
- DOMAIN-SUFFIX,flickriver.com,Proxy
- DOMAIN-SUFFIX,focustaiwan.tw,Proxy
- DOMAIN-SUFFIX,free.fr,Proxy
- DOMAIN-SUFFIX,ftchinese.com,Proxy
- DOMAIN-SUFFIX,gigacircle.com,Proxy
- DOMAIN-SUFFIX,gov,Proxy
- DOMAIN-SUFFIX,hk-pub.com,Proxy
- DOMAIN-SUFFIX,hosting.co.uk,Proxy
- DOMAIN-SUFFIX,hwcdn.net,Proxy
- DOMAIN-SUFFIX,jtvnw.net,Proxy
- DOMAIN-SUFFIX,linksalpha.com,Proxy
- DOMAIN-SUFFIX,manyvids.com,Proxy
- DOMAIN-SUFFIX,myactimes.com,Proxy
- DOMAIN-SUFFIX,newsblur.com,Proxy
- DOMAIN-SUFFIX,now.im,Proxy
- DOMAIN-SUFFIX,redditlist.com,Proxy
- DOMAIN-SUFFIX,signal.org,Proxy
- DOMAIN-SUFFIX,sparknotes.com,Proxy
- DOMAIN-SUFFIX,streetvoice.com,Proxy
- DOMAIN-SUFFIX,ttvnw.net,Proxy
- DOMAIN-SUFFIX,tv.com,Proxy
- DOMAIN-SUFFIX,twitchcdn.net,Proxy
- DOMAIN-SUFFIX,typepad.com,Proxy
- DOMAIN-SUFFIX,udnbkk.com,Proxy
- DOMAIN-SUFFIX,whispersystems.org,Proxy
- DOMAIN-SUFFIX,wikia.com,Proxy
- DOMAIN-SUFFIX,wn.com,Proxy
- DOMAIN-SUFFIX,wolframalpha.com,Proxy
- DOMAIN-SUFFIX,x-art.com,Proxy
- DOMAIN-SUFFIX,yimg.com,Proxy
- DOMAIN-KEYWORD,dlercloud,Proxy
- DOMAIN-SUFFIX,dler.cloud,Proxy
# Local Area Network
- DOMAIN-KEYWORD,announce,DIRECT
- DOMAIN-KEYWORD,torrent,DIRECT
- DOMAIN-KEYWORD,tracker,DIRECT
- DOMAIN-SUFFIX,smtp,DIRECT
- DOMAIN-SUFFIX,local,DIRECT
- IP-CIDR,192.168.0.0/16,DIRECT
- IP-CIDR,10.0.0.0/8,DIRECT
- IP-CIDR,172.16.0.0/12,DIRECT
- IP-CIDR,127.0.0.0/8,DIRECT
- IP-CIDR,100.64.0.0/10,DIRECT
# # > IQIYI
# - IP-CIDR,101.227.0.0/16,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# - IP-CIDR,101.224.0.0/13,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# - IP-CIDR,119.176.0.0/12,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# # > Youku
# - IP-CIDR,106.11.0.0/16,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# > Telegram
- IP-CIDR,67.198.55.0/24,Proxy
- IP-CIDR,91.108.4.0/22,Proxy
- IP-CIDR,91.108.8.0/22,Proxy
- IP-CIDR,91.108.12.0/22,Proxy
- IP-CIDR,91.108.16.0/22,Proxy
- IP-CIDR,91.108.56.0/22,Proxy
- IP-CIDR,109.239.140.0/24,Proxy
- IP-CIDR,149.154.160.0/20,Proxy
- IP-CIDR,205.172.60.0/22,Proxy
# (Extra IP-CIRD)
# > Google
- IP-CIDR,35.190.247.0/24,Proxy
- IP-CIDR,64.233.160.0/19,Proxy
- IP-CIDR,66.102.0.0/20,Proxy
- IP-CIDR,66.249.80.0/20,Proxy
- IP-CIDR,72.14.192.0/18,Proxy
- IP-CIDR,74.125.0.0/16,Proxy
- IP-CIDR,108.177.8.0/21,Proxy
- IP-CIDR,172.217.0.0/16,Proxy
- IP-CIDR,173.194.0.0/16,Proxy
- IP-CIDR,209.85.128.0/17,Proxy
- IP-CIDR,216.58.192.0/19,Proxy
- IP-CIDR,216.239.32.0/19,Proxy
# > Facebook
- IP-CIDR,31.13.24.0/21,Proxy
- IP-CIDR,31.13.64.0/18,Proxy
- IP-CIDR,45.64.40.0/22,Proxy
- IP-CIDR,66.220.144.0/20,Proxy
- IP-CIDR,69.63.176.0/20,Proxy
- IP-CIDR,69.171.224.0/19,Proxy
- IP-CIDR,74.119.76.0/22,Proxy
- IP-CIDR,103.4.96.0/22,Proxy
- IP-CIDR,129.134.0.0/17,Proxy
- IP-CIDR,157.240.0.0/17,Proxy
- IP-CIDR,173.252.64.0/19,Proxy
- IP-CIDR,173.252.96.0/19,Proxy
- IP-CIDR,179.60.192.0/22,Proxy
- IP-CIDR,185.60.216.0/22,Proxy
- IP-CIDR,204.15.20.0/22,Proxy
# > Twitter
- IP-CIDR,69.195.160.0/19,Proxy
- IP-CIDR,104.244.42.0/21,Proxy
- IP-CIDR,192.133.76.0/22,Proxy
- IP-CIDR,199.16.156.0/22,Proxy
- IP-CIDR,199.59.148.0/22,Proxy
- IP-CIDR,199.96.56.0/21,Proxy
- IP-CIDR,202.160.128.0/22,Proxy
- IP-CIDR,209.237.192.0/19,Proxy
# GeoIP China
- GEOIP,CN,Domestic
- MATCH,Others

View File

@@ -1,28 +0,0 @@
mixed-port: 7890
allow-lan: false
external-controller: 127.0.0.1:61889
secret: 5c090877-21bb-4006-a97c-0bd4bfbb9be9
log-level: info
ipv6: false
proxy-groups:
- name: PROXY-ALL
type: select
proxies:
- tc-sh
proxies:
- name: proxy-server
type: socks5
server: 192.168.11.19
port: 22999
username: zeaslity
password: password
- name: tc-sh
type: socks5
server: 42.192.52.227
port: 28888
username: zeaslity
password: lovemm.23
- name: tc-sh
type: socks5
server: 42.192.52.227
port: 28889

View File

@@ -1,2 +1,14 @@
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ install --without-geodata
sed -i "s/nobody/root/g" /etc/systemd/system/xray.service
systemctl daemon-reload
systemctl restart xray
systemctl enable xray
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ upgrade
journalctl -u xray -n 100 -f
xx.l4.ca.bg.107421.xyz

Some files were not shown because too many files have changed in this diff Show More