Compare commits

...

10 Commits

Author SHA1 Message Date
zeaslity
b5e802ebc3 新增大量内容 2025-09-01 16:52:17 +08:00
zeaslity
49803fa5ac clash新规则-完美 2025-08-24 19:14:26 +08:00
zeaslity
b9be57adfc clash新规则 新增CN2GIA节点 2025-08-22 18:00:42 +08:00
zeaslity
4313a200c0 香港节点-新增转发 2025-08-21 14:45:37 +08:00
zeaslity
1dc9314773 新增日本节点 2025-08-20 17:00:46 +08:00
zeaslity
d6025287a0 大量更新 2025-07-10 16:49:54 +08:00
zeaslity
ee93d8dc8c 大量更新 2025-03-07 17:14:52 +08:00
zeaslity
21ff6a711d 大量的修改 2025-02-18 09:49:51 +08:00
zeaslity
7db1e0f565 大量更新 2025-01-20 16:38:08 +08:00
zeaslity
ed9e0947e6 1 2024-11-28 20:17:04 +08:00
137 changed files with 14979 additions and 6414 deletions

View File

@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="dataSourceStorageLocal" created-in="IU-242.23726.103"> <component name="dataSourceStorageLocal" created-in="IU-252.25557.131">
<data-source name="腾讯云-成都" uuid="79c9466f-d8a3-418a-b54a-f6e314306a0c"> <data-source name="腾讯云-成都" uuid="79c9466f-d8a3-418a-b54a-f6e314306a0c">
<database-info product="MySQL" version="8.0.27" jdbc-version="4.2" driver-name="MySQL Connector/J" driver-version="mysql-connector-java-8.0.25 (Revision: 08be9e9b4cba6aa115f9b27b215887af40b159e0)" dbms="MYSQL" exact-version="8.0.27" exact-driver-version="8.0"> <database-info product="MySQL" version="8.0.27" jdbc-version="4.2" driver-name="MySQL Connector/J" driver-version="mysql-connector-java-8.0.25 (Revision: 08be9e9b4cba6aa115f9b27b215887af40b159e0)" dbms="MYSQL" exact-version="8.0.27" exact-driver-version="8.0">
<extra-name-characters>#@</extra-name-characters> <extra-name-characters>#@</extra-name-characters>

2
.idea/modules.xml generated
View File

@@ -2,7 +2,7 @@
<project version="4"> <project version="4">
<component name="ProjectModuleManager"> <component name="ProjectModuleManager">
<modules> <modules>
<module fileurl="file://$PROJECT_DIR$/.idea/Shell.iml" filepath="$PROJECT_DIR$/.idea/Shell.iml" /> <module fileurl="file://$PROJECT_DIR$/.idea/shell-script.iml" filepath="$PROJECT_DIR$/.idea/shell-script.iml" />
</modules> </modules>
</component> </component>
</project> </project>

View File

@@ -0,0 +1,342 @@
#!/bin/bash
# =============================================================================
# b-vault-warden备份脚本
# 功能远程执行Vaultwarden备份、同步备份文件、加密压缩及清理
# 版本1.0.0
# 作者Shell脚本工程师
# 许可证MIT License
# 依赖ssh, rsync, 7zip, docker (远程主机)
# =============================================================================
set -euo pipefail
IFS=$'\n\t'
# > 全局常量定义
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
readonly LOCK_FILE="/tmp/${SCRIPT_NAME}.lock"
# > 配置参数(可根据需要调整为环境变量)
readonly REMOTE_HOST="s5"
readonly REMOTE_PORT="22333"
readonly REMOTE_USER="root"
readonly REMOTE_BACKUP_CMD="docker exec vault-warden /vaultwarden backup"
readonly REMOTE_DATA_DIR="/data/vault-warden/persist-data"
readonly LOCAL_STAGE_DIR="/tmp/vault_warden_backup_stage"
readonly LOCAL_BACKUP_DIR="${SCRIPT_DIR}/backups"
readonly BACKUP_PATTERNS=(
"config.json"
"rsa_key*"
"attachments"
"sends"
"db_*.sqlite3"
)
readonly ENCRYPTION_PASSWORD="your_encryption_password_here" # > 请在实际使用时修改
# > 日志配置
readonly LOG_DIR="${SCRIPT_DIR}/logs"
readonly LOG_FILE="${LOG_DIR}/backup_$(date +%Y%m%d).log"
# > 颜色输出定义
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m'
# =============================================================================
# 日志函数集
# =============================================================================
###
# 初始化日志系统
# @require 无
# @return 0 成功 | >0 失败
###
init_log_system() {
mkdir -p "${LOG_DIR}" || return 1
touch "${LOG_FILE}" || return 1
return 0
}
###
# 记录日志消息
# @param level string 日志级别DEBUG/INFO/WARN/ERROR
# @param message string 日志消息
# @require LOG_FILE
# @return 0 成功
###
log_message() {
local level="$1"
local message="$2"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
case "${level}" in
"DEBUG") echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"INFO") echo -e "${GREEN}[INFO]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
"WARN") echo -e "${YELLOW}[WARN]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
"ERROR") echo -e "${RED}[ERROR]${NC} ${timestamp} - ${message}" | tee -a "${LOG_FILE}" >&2 ;;
*) echo "${timestamp} - ${message}" | tee -a "${LOG_FILE}" ;;
esac
return 0
}
# =============================================================================
# 工具函数集
# =============================================================================
###
# 检查命令是否存在
# @param command_name string 命令名称
# @require 无
# @return 0 存在 | 1 不存在
###
check_command() {
local command_name="$1"
if ! command -v "${command_name}" >/dev/null 2>&1; then
log_message "ERROR" "命令不存在: ${command_name}"
return 1
fi
return 0
}
###
# 执行远程SSH命令
# @param command string 要执行的命令
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER
# @return 远程命令的退出码
###
execute_remote_command() {
local command="$1"
ssh -p "${REMOTE_PORT}" "${REMOTE_USER}@${REMOTE_HOST}" "${command}"
return $?
}
###
# 创建锁文件防止并发执行
# @require LOCK_FILE
# @return 0 成功获取锁 | 1 锁已存在
###
acquire_lock() {
if [ -e "${LOCK_FILE}" ]; then
log_message "ERROR" "备份任务正在运行或异常退出,请检查锁文件: ${LOCK_FILE}"
return 1
fi
echo "$$" > "${LOCK_FILE}"
trap 'release_lock' EXIT
return 0
}
###
# 释放锁文件
# @require LOCK_FILE
# @return 0 成功
###
release_lock() {
[ -e "${LOCK_FILE}" ] && rm -f "${LOCK_FILE}"
return 0
}
# =============================================================================
# 核心备份函数
# =============================================================================
###
# 远程执行Vaultwarden备份命令
# @require execute_remote_command, REMOTE_BACKUP_CMD
# @return 0 成功 | >0 失败
###
remote_execute_backup() {
log_message "INFO" "开始在远程主机执行Vaultwarden备份..."
if ! execute_remote_command "${REMOTE_BACKUP_CMD}"; then
log_message "ERROR" "远程备份命令执行失败"
return 1
fi
log_message "INFO" "远程备份命令执行成功"
return 0
}
###
# 使用rsync同步备份文件到本地
# @require REMOTE_HOST, REMOTE_PORT, REMOTE_USER, REMOTE_DATA_DIR, LOCAL_STAGE_DIR, BACKUP_PATTERNS
# @return 0 成功 | >0 失败
###
sync_backup_files() {
log_message "INFO" "开始同步备份文件到本地..."
# > 创建本地暂存目录
mkdir -p "${LOCAL_STAGE_DIR}" || {
log_message "ERROR" "创建本地暂存目录失败: ${LOCAL_STAGE_DIR}"
return 1
}
# > 构建rsync命令
local rsync_cmd="rsync -avz --progress -e 'ssh -p ${REMOTE_PORT}'"
for pattern in "${BACKUP_PATTERNS[@]}"; do
rsync_cmd+=" ${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_DATA_DIR}/${pattern}"
done
rsync_cmd+=" ${LOCAL_STAGE_DIR}/"
# > 执行rsync同步
if ! eval "${rsync_cmd}"; then
log_message "ERROR" "文件同步失败"
return 1
fi
log_message "INFO" "文件同步完成"
return 0
}
###
# 使用7zip加密压缩备份文件
# @require LOCAL_STAGE_DIR, LOCAL_BACKUP_DIR, ENCRYPTION_PASSWORD
# @return 0 成功 | >0 失败
###
encrypt_and_compress() {
log_message "INFO" "开始加密压缩备份文件..."
# > 检查7zip命令
if ! check_command "7z"; then
log_message "ERROR" "7zip命令不存在请安装p7zip-full包"
return 1
fi
# > 创建备份目录
mkdir -p "${LOCAL_BACKUP_DIR}" || {
log_message "ERROR" "创建备份目录失败: ${LOCAL_BACKUP_DIR}"
return 1
}
local backup_file="${LOCAL_BACKUP_DIR}/vaultwarden-backup-$(date +%Y%m%d-%H%M%S).7z"
# > 执行加密压缩
if ! (cd "${LOCAL_STAGE_DIR}" && 7z a -p"${ENCRYPTION_PASSWORD}" -mhe=on "${backup_file}" . >/dev/null); then
log_message "ERROR" "加密压缩失败"
return 1
fi
log_message "INFO" "加密压缩完成: ${backup_file}"
return 0
}
###
# 远程删除备份数据库文件
# @require execute_remote_command, REMOTE_DATA_DIR
# @return 0 成功 | >0 失败
###
remote_cleanup_backup() {
log_message "INFO" "开始清理远程备份文件..."
local cleanup_cmd="rm -rf ${REMOTE_DATA_DIR}/db_*.sqlite3"
if ! execute_remote_command "${cleanup_cmd}"; then
log_message "ERROR" "远程清理失败"
return 1
fi
log_message "INFO" "远程清理完成"
return 0
}
###
# 清理本地暂存目录
# @require LOCAL_STAGE_DIR
# @return 0 成功
###
local_cleanup() {
log_message "INFO" "清理本地暂存目录..."
[ -d "${LOCAL_STAGE_DIR}" ] && rm -rf "${LOCAL_STAGE_DIR}"
return 0
}
# =============================================================================
# 主执行流程
# =============================================================================
###
# 主备份流程
# @require 所有上述函数
# @return 0 成功 | >0 失败
###
main_backup_process() {
log_message "INFO" "=== 开始Vaultwarden备份任务 ==="
# > 检查依赖命令
local required_commands=("ssh" "rsync" "7z")
for cmd in "${required_commands[@]}"; do
if ! check_command "${cmd}"; then
return 1
fi
done
# > 执行备份流程
local steps=(
remote_execute_backup
sync_backup_files
encrypt_and_compress
remote_cleanup_backup
local_cleanup
)
for step in "${steps[@]}"; do
if ! "${step}"; then
log_message "ERROR" "备份任务在第 ${#steps[@]} 步失败"
return 1
fi
done
log_message "INFO" "=== Vaultwarden备份任务完成 ==="
return 0
}
# =============================================================================
# 脚本入口点
# =============================================================================
# > 设置错误处理
trap 'log_message "ERROR" "脚本异常退出"; release_lock; exit 1' ERR
# > 主执行块
main() {
if ! acquire_lock; then
exit 1
fi
if ! init_log_system; then
log_message "ERROR" "日志系统初始化失败"
exit 1
fi
if ! main_backup_process; then
log_message "ERROR" "备份任务执行失败"
exit 1
fi
release_lock
exit 0
}
# > 脚本执行入口
main "$@"
# =============================================================================
# 函数调用关系图
# =============================================================================
# main
# ├── acquire_lock
# ├── init_log_system
# └── main_backup_process
# ├── check_command (多次调用)
# ├── remote_execute_backup
# │ └── execute_remote_command
# ├── sync_backup_files
# ├── encrypt_and_compress
# │ └── check_command
# ├── remote_cleanup_backup
# │ └── execute_remote_command
# └── local_cleanup

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# 定时任务 每天凌晨2点执行
# 环境变量
vault_warden_host_ip=s5
remote_fetch_vault_warden_backup_data() {
ssh -p 22333 root@s5 "docker exec -it vault-warden /vaultwarden backup"
ssh -p 22333 root@s5 "rm -rf /data/vault-warden/persist-data/db_*.sqlite3"
}
mkdir -p /tmp/vault_warden_backup_stage/
rsync -a /data/vault-warden/persist-data/config.json /data/vault-warden/persist-data/rsa_key* /data/vault-warden/persist-data/attachments /data/vault-warden/persist-data/sends /data/vault-warden/persist-data/db_*.sqlite3 /tmp/vault_warden_backup_stage/
将暂存目录的全部内容打包成最终的归档文件。
cd /tmp/vault_warden_backup_stage/
tar -czf vaultwarden-backup-$(date +%Y%m%d-%H%M%S).tar.gz /tmp/vault_warden_backup_stage/*
rm -rf /tmp/vault_warden_backup_stage/

View File

@@ -0,0 +1,7 @@
64.69.32.106
购买日期 2026/08/29
购买价格 15美元/年

View File

@@ -0,0 +1,28 @@
需要备份的内容
S5 nextcloud
S5 vault-warden
T0 gitea
你是一名计算机领域的大师,你非常善于应用互联网上成熟的工具和自己编写一些小工具,对于数据安全备份具有深刻的理解。
请为以下的情况创建一套解决方法,只需要解决方案,不需要实际的代码内容
主机A的配置为2C 2GB 120GB用作备份服务器可以安装任何软件
主机B的配置为4C 24GB 100GB 已有docker-compose部署的nextcloud和vault-warden应用nextcloud的信息为版本nextcloud:27.0.1-apache本地映射的目录为/data/nextcloud:/var/www/html使用的数据库为image: mariadb:10.5同样需要备份数据库信息。其备份方法请严格参考https://docs.nextcloud.com/server/latest/admin_manual/maintenance/backup.html
vault-warden的信息为版本image: vaultwarden/server:1.34.3-alpine本地存储目录为/data/vault-warden/persist-data:/data需要备份attachments文件其备份方法请严格参考https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
主机C的配置为4C 24GB 100GB 已部署gitea应用gitea的信息为image: docker.io/bitnami/gitea:1.19.3-debian-11-r0,本次存储目录为/var/lib/docker/wdd/gitea/gitea_data:/bitnami/gitea其备份方法请严格参考https://docs.gitea.com/zh-tw/administration/backup-and-restore
主机A B C是三台独立的公网服务器他们之间的网络是互联互通的。
主机A B C之间已经配置使用root用户的免费登录
主机ABC上的业务均使用root用户启动
请给出方案,
1 求B C之上的nextcloud数据按照1周一次备份到C vault-warden数据按照1填一次备份到C gitea的数据按照1周一次备份到C
2 最好采用软件自带的方式导出,需要考虑数据恢复
3 备份传输最好考虑安全加密,但不是必要的。
4 最好使用rsync和rclone的方式第一备份为主机A第二备份请放置于OneDrive

View File

@@ -0,0 +1,39 @@
# Vault-Warden备份
## 备份说明
备份频率 每天一次 通过crontab执行 每天凌晨2点执行
备份副本数 最近3份
官方备份说明 https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
## 备份过程
1. 远程执行s5执行vault-warden官方备份命令
2. rsync复制s5主机上特定的备份文件到本地主机备份目录/data/s5_146-56-159-175/vault-warden/
3. 远程执行s5删除掉备份的数据库文件
# NextCloud备份
## 备份说明
备份频率 每周一次 通过crontab执行 每周日凌晨2点执行
备份副本数 最近1份
官方备份说明 https://docs.nextcloud.com/server/latest/admin_manual/maintenance/backup.html
## 备份过程
1. 远程执行s5启用维护模式 docker exec nextcloud_web php occ maintenance:mode --on
2. 远程执行s5数据库备份 (MariaDB) docker exec nextcloud-db mariadb-dump --single-transaction -h localhost -u nextcloud -p'boge14@Level5' nextcloud > /data/nextcloud/nextcloud-db_backup_$(date +%Y%m%d-%H%M%S).sql
3. rsync复制s5主机上下面的备份文件到本地主机目录/data/s5_146-56-159-175/nextcloud/
1. /data/nextcloud/*
4. 远程执行s5: 删除掉下面的文件
1. /data/nextcloud/nextcloud-db_backup_*.sql
5. 远程执行s5: 禁用维护模式 docker exec nextcloud_web php occ maintenance:mode --off
# Gitea备份
## 备份说明
备份频率 每周三 周六凌晨2点执行
备份副本数 最近3份
官方备份说明 https://docs.gitea.com/zh-tw/administration/backup-and-restore
## 备份过程
1. 远程执行t0: 执行gitea备份命令 docker exec -it --tempdir=/bitnami/gitea/tmp gitea-gitea-1 /opt/bitnami/gitea/bin/gitea dump -c /bitnami/gitea/custom/conf/app.ini

View File

@@ -0,0 +1,22 @@
version: '3.8'
services:
crawl4ai:
image: unclecode/crawl4ai:basic
ports:
- "1235:11235"
environment:
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN:-} # Optional API security
- MAX_CONCURRENT_TASKS=${CRAWL4AI_API_TOKEN:-}
# LLM Provider Keys
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GEMINI_API_KEY=${GEMINI_API_KEY:-}
volumes:
- /dev/shm:/dev/shm
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 1G

View File

@@ -0,0 +1,9 @@
# API Security (optional)
CRAWL4AI_API_TOKEN=EP53z52yx1r8k87G7y34AMojqpCHU4eMxO1MEGOBwa5mlDYe
# LLM Provider Keys
OPENAI_API_KEY=sk-proj-lCRIbBe3ex7VJP5GzAklT3BlbkFJbOcB4cXRQKk7pNZjBCHM
GEMINI_API_KEY=AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM
# Other Configuration
MAX_CONCURRENT_TASKS=5

View File

@@ -2,7 +2,7 @@ version: '3.9'
services: services:
chatgpt-next-web: chatgpt-next-web:
container_name: chatgpt-next-web container_name: chatgpt-next-web
image: yidadaa/chatgpt-next-web image: yidadaa/chatgpt-next-web:v2.16.0
ports: ports:
- 3002:3000 - 3002:3000
environment: environment:

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,6 @@
这篇文章揭露了豆粕市场崩盘背后的资本博弈和行业乱象,核心内容包括:
1. **资本操控**:豆粕价格暴跌并非单纯市场供需所致,而是压榨巨头和饲料企业为争夺定价权进行的资本博弈,散户成为牺牲品。
1. **需求疲软假象**:所谓的“需求疲软”实则是饲料企业为保利润调整配方,减少豆粕用量,而养殖户因成本上升和猪周期下行遭受重创。
1. **开工率谎言**:压榨企业通过“停机检修”人为控制供应,制造紧张假象以操纵价格,同时过度依赖进口大豆威胁国内农业生态和产业安全。
2. **库存猫腻**:“缺豆不缺粕”的异常现象暗示压榨企业和贸易商囤积居奇,通过信息不对称和低买高卖剥削散户。
1. **未来预测争议**卓创资讯对4月豆粕价格走势的预测被质疑可能受利益集团影响提醒投资者保持独立判断警惕市场陷阱。

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# 1GB 1048576 5G 1048576 8G 8388608
sudo modprobe brd rd_nr=1 rd_size=1048576 max_part=1
sudo mkfs.ext4 /dev/ram0
sudo mkdir /mnt/ramdisk
sudo mount /dev/ram0 /mnt/ramdisk
# 测试
touch /mnt/ramdisk/test.txt
# 清理
sudo umount /mnt/ramdisk

View File

@@ -0,0 +1,35 @@
# seoul tokyo amd64
140.238.0.0/16
# seoul arm-01
132.145.87.10/32
# seoul arm-02
146.56.0.0/16
# tokyo arm
150.230.0.0/16
# phonix send to boge
#144.24.0.0/16
# phonix amd
129.146.0.0/16
# osaka amd64
140.83.0.0/16
# frankfurt amd64
158.180.0.0/16
# tencent-shanghai
42.192.52.227/32
# tencent-hongkong
43.154.83.213/32
# Rare.io-amd64-deussdolf
144.24.164.121/32
# BitsFLowCloud amd64 LosAngles CN2GIA
154.40.34.106/32

View File

@@ -1,17 +0,0 @@
# seoul arm
146.56.0.0/16
# tokyo arm
150.230.0.0/16
# tokyo seoul
140.238.0.0/16
# phonix send to boge
#144.24.0.0/16
# phonix amd
129.146.0.0/16
# osaka amd64
140.83.0.0/16
# tencent-shanghai
42.192.52.227/32
# tencent-hongkong
43.154.83.213/32

View File

@@ -0,0 +1,122 @@
server {
server_name dify.107421.xyz;
listen 80 ;
return 301 https://dify.107421.xyz$request_uri;
}
server {
listen 443 ssl;
server_name dify.107421.xyz;
ssl_certificate /etc/nginx/conf.d/ssl_key/dify.107421.xyz.cert.pem;
ssl_certificate_key /etc/nginx/conf.d/ssl_key/dify.107421.xyz.key.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
location /console/api {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /api {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /v1 {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /files {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:5001;
}
location /explore {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:3000;
}
location /e {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_set_header Dify-Hook-Url ://;
proxy_pass http://129.146.65.80:5002;
}
location / {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Port $server_port;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
proxy_send_timeout 10000s;
proxy_read_timeout 10000s;
client_body_timeout 6000s;
proxy_pass http://129.146.65.80:3000;
}
}

View File

@@ -0,0 +1,564 @@
# ==================================================================
# WARNING: This file is auto-generated by generate_docker_compose
# Do not modify this file directly. Instead, update the .env.example
# or docker-compose-template.yaml and regenerate this file.
# ==================================================================
x-shared-env: &shared-api-worker-env
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
SERVICE_API_URL: ${SERVICE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
APP_WEB_URL: ${APP_WEB_URL:-}
FILES_URL: ${FILES_URL:-}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
LOG_TZ: ${LOG_TZ:-UTC}
DEBUG: ${DEBUG:-false}
FLASK_DEBUG: ${FLASK_DEBUG:-false}
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
INIT_PASSWORD: ${INIT_PASSWORD:-}
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
DIFY_PORT: ${DIFY_PORT:-5001}
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
DB_USERNAME: ${DB_USERNAME:-postgres}
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
DB_HOST: ${DB_HOST:-db}
DB_PORT: ${DB_PORT:-5432}
DB_DATABASE: ${DB_DATABASE:-dify}
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_USERNAME: ${REDIS_USERNAME:-}
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
REDIS_DB: ${REDIS_DB:-0}
REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
REDIS_SENTINELS: ${REDIS_SENTINELS:-}
REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
S3_ENDPOINT: ${S3_ENDPOINT:-}
S3_REGION: ${S3_REGION:-us-east-1}
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://<your_account_name>.blob.core.windows.net}
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com}
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com}
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
OCI_REGION: ${OCI_REGION:-us-ashburn-1}
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
MILVUS_USER: ${MILVUS_USER:-root}
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
MYSCALE_USER: ${MYSCALE_USER:-default}
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server}
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
CHROMA_PORT: ${CHROMA_PORT:-8000}
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
ORACLE_HOST: ${ORACLE_HOST:-oracle}
ORACLE_PORT: ${ORACLE_PORT:-1521}
ORACLE_USER: ${ORACLE_USER:-dify}
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
RELYT_HOST: ${RELYT_HOST:-db}
RELYT_PORT: ${RELYT_PORT:-5432}
RELYT_USER: ${RELYT_USER:-postgres}
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
KIBANA_PORT: ${KIBANA_PORT:-5601}
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
ETL_TYPE: ${ETL_TYPE:-dify}
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
SENTRY_DSN: ${SENTRY_DSN:-}
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
MAIL_TYPE: ${MAIL_TYPE:-resend}
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
SMTP_SERVER: ${SMTP_SERVER:-}
SMTP_PORT: ${SMTP_PORT:-465}
SMTP_USERNAME: ${SMTP_USERNAME:-}
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3}
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
PGUSER: ${PGUSER:-${DB_USERNAME}}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
NGINX_PORT: ${NGINX_PORT:-80}
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com}
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5}
SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5}
SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5}
SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5}
EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80}
EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443}
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
CSP_WHITELIST: ${CSP_WHITELIST:-}
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002}
PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002}
PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
services:
# API service
api:
image: langgenius/dify-api:1.1.3
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'api' starts the API server.
MODE: api
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
dns:
- 1.1.1.1
- 8.8.8.8
volumes:
# Mount the storage directory to the container, for storing user files.
- /mnt/ramdisk/dify-api/storage:/app/api/storage
- /etc/resolv.conf:/etc/resolv.conf
network_mode: "host"
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:1.1.3
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
# Startup mode, 'worker' starts the Celery worker for processing the queue.
MODE: worker
SENTRY_DSN: ${API_SENTRY_DSN:-}
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
volumes:
# Mount the storage directory to the container, for storing user files.
- /mnt/ramdisk/dify-api/storage:/app/api/storage
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
network_mode: "host"
# Frontend web application.
web:
image: langgenius/dify-web:1.1.3
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
CSP_WHITELIST: ${CSP_WHITELIST:-}
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
ports:
- 3000:3000
# The DifySandbox
sandbox:
image: langgenius/dify-sandbox:0.2.11
restart: always
environment:
# The DifySandbox configurations
# Make sure you are changing this key for your deployment with a strong key.
# You can generate a strong key using `openssl rand -base64 42`.
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
volumes:
- /mnt/ramdisk/sandbox/dependencies:/dependencies
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
healthcheck:
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
network_mode: "host"
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.0.6-local
restart: always
environment:
# Use the shared environment variables.
<<: *shared-api-worker-env
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0}
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5002}
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
network_mode: "host"
volumes:
- /mnt/ramdisk/plugin_daemon:/app/storage
- /etc/resolv.conf:/etc/resolv.conf
dns:
- 1.1.1.1
- 8.8.8.8
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
# ssrf_proxy:
# image: ubuntu/squid:latest
# restart: always
# volumes:
# - /mnt/ramdisk/ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
# - /mnt/ramdisk/ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
# entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
# environment:
# # pls clearly modify the squid env vars to fit your network environment.
# HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
# COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
# REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
# SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
# SANDBOX_PORT: ${SANDBOX_PORT:-8194}
# networks:
# - ssrf_proxy_network
# - default
# networks:
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
# ssrf_proxy_network:
# driver: bridge

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Modified based on Squid OCI image entrypoint
# This entrypoint aims to forward the squid logs to stdout to assist users of
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
# access the service logs.
# Moreover, it invokes the squid binary, leaving all the desired parameters to
# be provided by the "command" passed to the spawned container. If no command
# is provided by the user, the default behavior (as per the CMD statement in
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
# systemd unit.
# [1] The default configuration is changed in the Dockerfile to allow local
# network connections. See the Dockerfile for further information.
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
fi
tail -F /var/log/squid/access.log 2>/dev/null &
tail -F /var/log/squid/error.log 2>/dev/null &
tail -F /var/log/squid/store.log 2>/dev/null &
tail -F /var/log/squid/cache.log 2>/dev/null &
# Replace environment variables in the template and output to the squid.conf
echo "[ENTRYPOINT] replacing environment variables in the template"
awk '{
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
var = substr($0, RSTART+2, RLENGTH-3)
val = ENVIRON[var]
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
}
print
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
/usr/sbin/squid -Nz
echo "[ENTRYPOINT] starting squid"
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1

View File

@@ -0,0 +1,54 @@
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
acl CONNECT method CONNECT
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
http_access allow localhost
include /etc/squid/conf.d/*.conf
http_access deny all
################################## Proxy Server ################################
http_port ${HTTP_PORT}
coredump_dir ${COREDUMP_DIR}
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern . 0 20% 4320
# cache_dir ufs /var/spool/squid 100 16 256
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
################################## Reverse Proxy To Sandbox ################################
http_port ${REVERSE_PROXY_PORT} accel vhost
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
acl src_all src all
http_access allow src_all
docker exec -it dify_api_1 telnet 10.0.0.247 6379

View File

@@ -0,0 +1,743 @@
# ------------------------------
# Environment Variables for API service & worker
# ------------------------------
# ------------------------------
# Common Variables
# ------------------------------
# The backend URL of the console API,
# used to concatenate the authorization callback.
# If empty, it is the same domain.
# Example: https://api.console.dify.ai
CONSOLE_API_URL=
# The front-end URL of the console web,
# used to concatenate some front-end addresses and for CORS configuration use.
# If empty, it is the same domain.
# Example: https://console.dify.ai
CONSOLE_WEB_URL=
# Service API Url,
# used to display Service API Base Url to the front-end.
# If empty, it is the same domain.
# Example: https://api.dify.ai
SERVICE_API_URL=
# WebApp API backend Url,
# used to declare the back-end URL for the front-end API.
# If empty, it is the same domain.
# Example: https://api.app.dify.ai
APP_API_URL=
# WebApp Url,
# used to display WebAPP API Base Url to the front-end.
# If empty, it is the same domain.
# Example: https://app.dify.ai
APP_WEB_URL=
# File preview or download Url prefix.
# used to display File preview or download Url to the front-end or as Multi-model inputs;
# Url is signed and has expiration time.
FILES_URL=
# ------------------------------
# Server Configuration
# ------------------------------
# The log level for the application.
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
LOG_LEVEL=INFO
# Log file path
LOG_FILE=/root/app-install/dify/logs/server.log
# Log file max size, the unit is MB
LOG_FILE_MAX_SIZE=50
# Log file max backup count
LOG_FILE_BACKUP_COUNT=5
# Log dateformat
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
# Log Timezone
LOG_TZ=Asia/Shanghai
# Debug mode, default is false.
# It is recommended to turn on this configuration for local development
# to prevent some problems caused by monkey patch.
DEBUG=false
# Flask debug mode, it can output trace information at the interface when turned on,
# which is convenient for debugging.
FLASK_DEBUG=false
# A secretkey that is used for securely signing the session cookie
# and encrypting sensitive information on the database.
# You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY=bBj28uxctAwybtLFUr1Zlc3OKlTG5SsUiz+W9v71s0+YytuD8+Um8Qdy
# Password for admin user initialization.
# If left unset, admin user will not be prompted for a password
# when creating the initial admin account.
# The length of the password cannot exceed 30 charactors.
INIT_PASSWORD=loveff.cxc.23
# Deployment environment.
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
# Testing environment. There will be a distinct color label on the front-end page,
# indicating that this environment is a testing environment.
DEPLOY_ENV=PRODUCTION
# Whether to enable the version check policy.
# If set to empty, https://updates.dify.ai will be called for version check.
CHECK_UPDATE_URL=https://updates.dify.ai
# Used to change the OpenAI base address, default is https://api.openai.com/v1.
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
# or when a local model provides OpenAI compatible API, it can be replaced.
OPENAI_API_BASE=https://api.openai.com/v1
# When enabled, migrations will be executed prior to application startup
# and the application will start after the migrations have completed.
MIGRATION_ENABLED=true
# File Access Time specifies a time interval in seconds for the file to be accessed.
# The default value is 300 seconds.
FILES_ACCESS_TIMEOUT=300
# Access token expiration time in minutes
ACCESS_TOKEN_EXPIRE_MINUTES=60
# Refresh token expiration time in days
REFRESH_TOKEN_EXPIRE_DAYS=30
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
APP_MAX_ACTIVE_REQUESTS=0
APP_MAX_EXECUTION_TIME=1200
# ------------------------------
# Container Startup Related Configuration
# Only effective when starting with docker image or docker-compose.
# ------------------------------
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
DIFY_BIND_ADDRESS=0.0.0.0
# API service binding port number, default 5001.
DIFY_PORT=5001
# The number of API server workers, i.e., the number of workers.
# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
SERVER_WORKER_AMOUNT=2
# Defaults to gevent. If using windows, it can be switched to sync or solo.
SERVER_WORKER_CLASS=gevent
# Default number of worker connections, the default is 10.
SERVER_WORKER_CONNECTIONS=10
# Similar to SERVER_WORKER_CLASS.
# If using windows, it can be switched to sync or solo.
CELERY_WORKER_CLASS=
# Request handling timeout. The default is 200,
# it is recommended to set it to 360 to support a longer sse connection time.
GUNICORN_TIMEOUT=360
# The number of Celery workers. The default is 1, and can be set as needed.
CELERY_WORKER_AMOUNT=
# Flag indicating whether to enable autoscaling of Celery workers.
#
# Autoscaling is useful when tasks are CPU intensive and can be dynamically
# allocated and deallocated based on the workload.
#
# When autoscaling is enabled, the maximum and minimum number of workers can
# be specified. The autoscaling algorithm will dynamically adjust the number
# of workers within the specified range.
#
# Default is false (i.e., autoscaling is disabled).
#
# Example:
# CELERY_AUTO_SCALE=true
CELERY_AUTO_SCALE=true
# The maximum number of Celery workers that can be autoscaled.
# This is optional and only used when autoscaling is enabled.
# Default is not set.
CELERY_MAX_WORKERS=3
# The minimum number of Celery workers that can be autoscaled.
# This is optional and only used when autoscaling is enabled.
# Default is not set.
CELERY_MIN_WORKERS=1
# API Tool configuration
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
API_TOOL_DEFAULT_READ_TIMEOUT=60
# ------------------------------
# Database Configuration
# The database uses PostgreSQL. Please use the public schema.
# It is consistent with the configuration in the 'db' service below.
# ------------------------------
DB_USERNAME=postgres
DB_PASSWORD=V2rayStrP@ss
DB_HOST=10.0.0.247
DB_PORT=5432
DB_DATABASE=dify
# The size of the database connection pool.
# The default is 30 connections, which can be appropriately increased.
SQLALCHEMY_POOL_SIZE=30
# Database connection pool recycling time, the default is 3600 seconds.
SQLALCHEMY_POOL_RECYCLE=3600
# Whether to print SQL, default is false.
SQLALCHEMY_ECHO=false
# Maximum number of connections to the database
# Default is 100
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
POSTGRES_MAX_CONNECTIONS=100
# Sets the amount of shared memory used for postgres's shared buffers.
# Default is 128MB
# Recommended value: 25% of available memory
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
POSTGRES_SHARED_BUFFERS=4096MB
# Sets the amount of memory used by each database worker for working space.
# Default is 4MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
POSTGRES_WORK_MEM=64MB
# Sets the amount of memory reserved for maintenance activities.
# Default is 64MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
POSTGRES_MAINTENANCE_WORK_MEM=128MB
# Sets the planner's assumption about the effective cache size.
# Default is 4096MB
#
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
POSTGRES_EFFECTIVE_CACHE_SIZE=8192MB
# ------------------------------
# Redis Configuration
# This Redis configuration is used for caching and for pub/sub during conversation.
# ------------------------------
REDIS_HOST=10.0.0.247
REDIS_PORT=6379
REDIS_USERNAME=
REDIS_PASSWORD=V2rayStrP@ss
REDIS_USE_SSL=false
REDIS_DB=0
# Whether to use Redis Sentinel mode.
# If set to true, the application will automatically discover and connect to the master node through Sentinel.
REDIS_USE_SENTINEL=false
# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port.
# Format: `<sentinel1_ip>:<sentinel1_port>,<sentinel2_ip>:<sentinel2_port>,<sentinel3_ip>:<sentinel3_port>`
REDIS_SENTINELS=
REDIS_SENTINEL_SERVICE_NAME=
REDIS_SENTINEL_USERNAME=
REDIS_SENTINEL_PASSWORD=
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port.
# Format: `<Cluster1_ip>:<Cluster1_port>,<Cluster2_ip>:<Cluster2_port>,<Cluster3_ip>:<Cluster3_port>`
REDIS_USE_CLUSTERS=false
REDIS_CLUSTERS=
REDIS_CLUSTERS_PASSWORD=
# ------------------------------
# Celery Configuration
# ------------------------------
# Use redis as the broker, and redis db 1 for celery broker.
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`
# Example: redis://:V2rayStrP@ss@redis:6379/1
# If use Redis Sentinel, format as follows: `sentinel://<sentinel_username>:<sentinel_password>@<sentinel_host>:<sentinel_port>/<redis_database>`
# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1
CELERY_BROKER_URL=redis://:V2rayStrP@ss@10.0.0.247:6379/1
BROKER_USE_SSL=false
# If you are using Redis Sentinel for high availability, configure the following settings.
CELERY_USE_SENTINEL=false
CELERY_SENTINEL_MASTER_NAME=
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
# ------------------------------
# CORS Configuration
# Used to set the front-end cross-domain access policy.
# ------------------------------
# Specifies the allowed origins for cross-origin requests to the Web API,
# e.g. https://dify.app or * for all origins.
WEB_API_CORS_ALLOW_ORIGINS=*
# Specifies the allowed origins for cross-origin requests to the console API,
# e.g. https://cloud.dify.ai or * for all origins.
CONSOLE_CORS_ALLOW_ORIGINS=*
# ------------------------------
# File Storage Configuration
# ------------------------------
# The type of storage to use for storing user files.
STORAGE_TYPE=opendal
# Apache OpenDAL Configuration
# The configuration for OpenDAL consists of the following format: OPENDAL_<SCHEME_NAME>_<CONFIG_NAME>.
# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services.
# Dify will scan configurations starting with OPENDAL_<SCHEME_NAME> and automatically apply them.
# The scheme name for the OpenDAL storage.
OPENDAL_SCHEME=fs
# Configurations for OpenDAL Local File System.
OPENDAL_FS_ROOT=storage
# S3 Configuration
#
S3_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com
S3_REGION=us-phoenix-1
S3_BUCKET_NAME=phoenix-10
S3_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2
S3_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk=
# Whether to use AWS managed IAM roles for authenticating with the S3 service.
# If set to false, the access key and secret key must be provided.
S3_USE_AWS_MANAGED_IAM=false
# Azure Blob Configuration
#
AZURE_BLOB_ACCOUNT_NAME=difyai
AZURE_BLOB_ACCOUNT_KEY=difyai
AZURE_BLOB_CONTAINER_NAME=difyai-container
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
# Google Storage Configuration
#
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=
# The Alibaba Cloud OSS configurations,
#
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
ALIYUN_OSS_ACCESS_KEY=your-access-key
ALIYUN_OSS_SECRET_KEY=your-secret-key
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
ALIYUN_OSS_REGION=ap-southeast-1
ALIYUN_OSS_AUTH_VERSION=v4
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
# Tencent COS Configuration
#
TENCENT_COS_BUCKET_NAME=your-bucket-name
TENCENT_COS_SECRET_KEY=your-secret-key
TENCENT_COS_SECRET_ID=your-secret-id
TENCENT_COS_REGION=your-region
TENCENT_COS_SCHEME=your-scheme
# Oracle Storage Configuration
#
OCI_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com
OCI_BUCKET_NAME=phoenix-10
OCI_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2
OCI_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk=
OCI_REGION=us-phoenix-1
# Huawei OBS Configuration
#
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
HUAWEI_OBS_SECRET_KEY=your-secret-key
HUAWEI_OBS_ACCESS_KEY=your-access-key
HUAWEI_OBS_SERVER=your-server-url
# Volcengine TOS Configuration
#
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
VOLCENGINE_TOS_ENDPOINT=your-server-url
VOLCENGINE_TOS_REGION=your-region
# Baidu OBS Storage Configuration
#
BAIDU_OBS_BUCKET_NAME=your-bucket-name
BAIDU_OBS_SECRET_KEY=your-secret-key
BAIDU_OBS_ACCESS_KEY=your-access-key
BAIDU_OBS_ENDPOINT=your-server-url
# Supabase Storage Configuration
#
SUPABASE_BUCKET_NAME=your-bucket-name
SUPABASE_API_KEY=your-access-key
SUPABASE_URL=your-server-url
# ------------------------------
# Vector Database Configuration
# ------------------------------
# The type of vector store to use.
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
VECTOR_STORE=weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
WEAVIATE_ENDPOINT=http://10.0.0.247:8080
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
QDRANT_URL=http://qdrant:6333
QDRANT_API_KEY=V2rayStrP@ss
QDRANT_CLIENT_TIMEOUT=20
QDRANT_GRPC_ENABLED=false
QDRANT_GRPC_PORT=6334
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
# The milvus uri.
MILVUS_URI=http://127.0.0.1:19530
MILVUS_TOKEN=
MILVUS_USER=root
MILVUS_PASSWORD=Milvus
MILVUS_ENABLE_HYBRID_SEARCH=False
# MyScale configuration, only available when VECTOR_STORE is `myscale`
# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to:
# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters
MYSCALE_HOST=myscale
MYSCALE_PORT=8123
MYSCALE_USER=default
MYSCALE_PASSWORD=
MYSCALE_DATABASE=dify
MYSCALE_FTS_PARAMS=
# Chroma configuration, only available when VECTOR_STORE is `chroma`
CHROMA_HOST=127.0.0.1
CHROMA_PORT=8000
CHROMA_TENANT=default_tenant
CHROMA_DATABASE=default_database
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS=
# Oracle configuration, only available when VECTOR_STORE is `oracle`
ORACLE_HOST=oracle
ORACLE_PORT=1521
ORACLE_USER=dify
ORACLE_PASSWORD=dify
ORACLE_DATABASE=FREEPDB1
# ------------------------------
# Knowledge Configuration
# ------------------------------
# Upload file size limit, default 15M.
UPLOAD_FILE_SIZE_LIMIT=150
# The maximum number of files that can be uploaded at a time, default 5.
UPLOAD_FILE_BATCH_LIMIT=10
# ETL type, support: `dify`, `Unstructured`
# `dify` Dify's proprietary file extraction scheme
# `Unstructured` Unstructured.io file extraction scheme
ETL_TYPE=dify
# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured
# Or using Unstructured for document extractor node for pptx.
# For example: http://unstructured:8000/general/v0/general
UNSTRUCTURED_API_URL=
UNSTRUCTURED_API_KEY=
SCARF_NO_ANALYTICS=true
# ------------------------------
# Model Configuration
# ------------------------------
# The maximum number of tokens allowed for prompt generation.
# This setting controls the upper limit of tokens that can be used by the LLM
# when generating a prompt in the prompt generation tool.
# Default: 512 tokens.
PROMPT_GENERATION_MAX_TOKENS=4096
# The maximum number of tokens allowed for code generation.
# This setting controls the upper limit of tokens that can be used by the LLM
# when generating code in the code generation tool.
# Default: 1024 tokens.
CODE_GENERATION_MAX_TOKENS=20480
# ------------------------------
# Multi-modal Configuration
# ------------------------------
# The format of the image/video/audio/document sent when the multi-modal model is input,
# the default is base64, optional url.
# The delay of the call in url mode will be lower than that in base64 mode.
# It is generally recommended to use the more compatible base64 mode.
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
MULTIMODAL_SEND_FORMAT=base64
# Upload image file size limit, default 10M.
UPLOAD_IMAGE_FILE_SIZE_LIMIT=100
# Upload video file size limit, default 100M.
UPLOAD_VIDEO_FILE_SIZE_LIMIT=10000
# Upload audio file size limit, default 50M.
UPLOAD_AUDIO_FILE_SIZE_LIMIT=500
# ------------------------------
# Sentry Configuration
# Used for application monitoring and error log tracking.
# ------------------------------
SENTRY_DSN=
# API Service Sentry DSN address, default is empty, when empty,
# all monitoring information is not reported to Sentry.
# If not set, Sentry error reporting will be disabled.
API_SENTRY_DSN=
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
API_SENTRY_TRACES_SAMPLE_RATE=1.0
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
API_SENTRY_PROFILES_SAMPLE_RATE=1.0
# Web Service Sentry DSN address, default is empty, when empty,
# all monitoring information is not reported to Sentry.
# If not set, Sentry error reporting will be disabled.
WEB_SENTRY_DSN=
# ------------------------------
# Notion Integration Configuration
# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
# ------------------------------
# Configure as "public" or "internal".
# Since Notion's OAuth redirect URL only supports HTTPS,
# if deploying locally, please use Notion's internal integration.
NOTION_INTEGRATION_TYPE=internal
# Notion OAuth client secret (used for public integration type)
NOTION_CLIENT_SECRET=
# Notion OAuth client id (used for public integration type)
NOTION_CLIENT_ID=
# Notion internal integration secret.
# If the value of NOTION_INTEGRATION_TYPE is "internal",
# you need to configure this variable.
NOTION_INTERNAL_SECRET=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
# ------------------------------
# Mail related configuration
# ------------------------------
# Mail type, support: resend, smtp
MAIL_TYPE=resend
# Default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM=
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
RESEND_API_URL=https://api.resend.com
RESEND_API_KEY=your-resend-api-key
# SMTP server configuration, used when MAIL_TYPE is `smtp`
SMTP_SERVER=
SMTP_PORT=465
SMTP_USERNAME=
SMTP_PASSWORD=
SMTP_USE_TLS=true
SMTP_OPPORTUNISTIC_TLS=false
# ------------------------------
# Others Configuration
# ------------------------------
# Maximum length of segmentation tokens for indexing
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
# Member invitation link valid time (hours),
# Default: 72.
INVITE_EXPIRY_HOURS=72
# Reset password token valid time (minutes),
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
# The sandbox service endpoint.
CODE_EXECUTION_ENDPOINT=http://sandbox:8194
CODE_EXECUTION_API_KEY=dify-sandbox
CODE_MAX_NUMBER=9223372036854775807
CODE_MIN_NUMBER=-9223372036854775808
CODE_MAX_DEPTH=5
CODE_MAX_PRECISION=20
CODE_MAX_STRING_LENGTH=80000
CODE_MAX_STRING_ARRAY_LENGTH=30
CODE_MAX_OBJECT_ARRAY_LENGTH=30
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
CODE_EXECUTION_CONNECT_TIMEOUT=10
CODE_EXECUTION_READ_TIMEOUT=60
CODE_EXECUTION_WRITE_TIMEOUT=10
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
# Workflow runtime configuration
WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
MAX_VARIABLE_SIZE=204800
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
WORKFLOW_FILE_UPLOAD_LIMIT=10
# HTTP request node in workflow configuration
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
# 指向 Amd64-02
#SSRF_PROXY_ALL_URL=socks5://10.0.0.246:2234
# SSRF Proxy server HTTP URL
SSRF_PROXY_HTTP_URL=http://10.0.0.246:1234
# SSRF Proxy server HTTPS URL
SSRF_PROXY_HTTPS_URL=http://10.0.0.246:2234
# ------------------------------
# Environment Variables for web Service
# ------------------------------
# The timeout for the text generation in millisecond
TEXT_GENERATION_TIMEOUT_MS=60000
# ------------------------------
# Environment Variables for db Service
# ------------------------------
PGUSER=${DB_USERNAME}
# The password for the default postgres user.
POSTGRES_PASSWORD=${DB_PASSWORD}
# The name of the default postgres database.
POSTGRES_DB=${DB_DATABASE}
# postgres data directory
PGDATA=/var/lib/postgresql/data/pgdata
# ------------------------------
# Environment Variables for sandbox Service
# ------------------------------
# The API key for the sandbox service
SANDBOX_API_KEY=dify-sandbox
# The mode in which the Gin framework runs
SANDBOX_GIN_MODE=release
# The timeout for the worker in seconds
SANDBOX_WORKER_TIMEOUT=15
# Enable network for the sandbox service
SANDBOX_ENABLE_NETWORK=true
# HTTP proxy URL for SSRF protection
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
# HTTPS proxy URL for SSRF protection
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
# The port on which the sandbox service runs
SANDBOX_PORT=8194
# ------------------------------
# Environment Variables for weaviate Service
# (only used when VECTOR_STORE is weaviate)
# ------------------------------
WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
WEAVIATE_CLUSTER_HOSTNAME=node1
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
# ------------------------------
# Environment Variables for SSRF Proxy
# ------------------------------
SSRF_HTTP_PORT=3128
SSRF_COREDUMP_DIR=/var/spool/squid
SSRF_REVERSE_PROXY_PORT=8194
SSRF_SANDBOX_HOST=sandbox
SSRF_DEFAULT_TIME_OUT=5
SSRF_DEFAULT_CONNECT_TIME_OUT=5
SSRF_DEFAULT_READ_TIME_OUT=5
SSRF_DEFAULT_WRITE_TIME_OUT=5
# ------------------------------
# docker env var for specifying vector db type at startup
# (based on the vector db type, the corresponding docker
# compose profile will be used)
# if you want to use unstructured, add ',unstructured' to the end
# ------------------------------
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
# ------------------------------
# Docker Compose Service Expose Host Port Configurations
# ------------------------------
EXPOSE_NGINX_PORT=20080
EXPOSE_NGINX_SSL_PORT=20443
# ----------------------------------------------------------------------------
# ModelProvider & Tool Position Configuration
# Used to specify the model providers and tools that can be used in the app.
# ----------------------------------------------------------------------------
# Pin, include, and exclude tools
# Use comma-separated values with no spaces between items.
# Example: POSITION_TOOL_PINS=bing,google
POSITION_TOOL_PINS=
POSITION_TOOL_INCLUDES=
POSITION_TOOL_EXCLUDES=
# Pin, include, and exclude model providers
# Use comma-separated values with no spaces between items.
# Example: POSITION_PROVIDER_PINS=openai,openllm
POSITION_PROVIDER_PINS=
POSITION_PROVIDER_INCLUDES=
POSITION_PROVIDER_EXCLUDES=
# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
CSP_WHITELIST=
# Enable or disable create tidb service job
CREATE_TIDB_SERVICE_JOB_ENABLED=false
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
MAX_SUBMIT_COUNT=100
# The maximum number of top-k value for RAG.
TOP_K_MAX_VALUE=10
# ------------------------------
# Plugin Daemon Configuration
# ------------------------------
DB_PLUGIN_DATABASE=dify_plugin
EXPOSE_PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_PORT=5002
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
PLUGIN_DAEMON_URL=http://10.0.0.12:5002
PLUGIN_MAX_PACKAGE_SIZE=52428800
PLUGIN_PPROF_ENABLED=false
PLUGIN_DEBUGGING_HOST=0.0.0.0
PLUGIN_DEBUGGING_PORT=5003
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
PLUGIN_DIFY_INNER_API_URL=http://10.0.0.12:5001
ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai
FORCE_VERIFYING_SIGNATURE=true
PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120
PLUGIN_MAX_EXECUTION_TIMEOUT=600
# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple
PIP_MIRROR_URL=

View File

@@ -0,0 +1,69 @@
# ==================================================================
# WARNING: This file is auto-generated by generate_docker_compose
# Do not modify this file directly. Instead, update the .env.example
# or docker-compose-template.yaml and regenerate this file.
# ==================================================================
services:
# The postgres database.
db:
image: postgres:15-alpine
restart: always
environment:
PGUSER: ${PGUSER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-V2rayStrP@ss}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
command: >
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-1024MB}'
-c 'work_mem=${POSTGRES_WORK_MEM:-64MB}'
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-128MB}'
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-2048MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
test: [ 'CMD', 'pg_isready' ]
interval: 1s
timeout: 3s
retries: 30
ports:
- 5432:5432
# The Weaviate vector store.
weaviate:
image: semitechnologies/weaviate:1.19.0
restart: always
volumes:
# Mount the Weaviate data directory to the con tainer.
- /mnt/ramdisk/weaviate:/var/lib/weaviate
environment:
# The Weaviate configurations
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-50}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
ports:
- 8080:8080
redis:
image: redis:6-alpine
restart: always
environment:
REDISCLI_AUTH: ${REDIS_PASSWORD:-V2rayStrP@ss}
volumes:
# Mount the redis data directory to the container.
- /mnt/ramdisk/redis/data:/data
# Set the redis password when startup redis server.
command: redis-server --requirepass ${REDIS_PASSWORD:-V2rayStrP@ss}
healthcheck:
test: [ 'CMD', 'redis-cli', 'ping' ]
ports:
- 6379:6379

View File

@@ -0,0 +1,4 @@
129.146.57.94
10.0.0.247

View File

@@ -2,3 +2,5 @@
### https://www.dejavu.moe/posts/selfhosted-bitwarden-with-cloudflare-tunnel/ ### https://www.dejavu.moe/posts/selfhosted-bitwarden-with-cloudflare-tunnel/
官方使用说明 https://github.com/dani-garcia/vaultwarden/wiki

View File

@@ -9,6 +9,12 @@ EMERGENCY_ACCESS_ALLOWED=true
# 日志等级 # 日志等级
LOG_LEVEL=warn LOG_LEVEL=warn
# 是否禁止
DISABLE_ADMIN_TOKEN=true
# 是否允许注册
SIGNUPS_ALLOWED=false
# 注册需要验证? # 注册需要验证?
SIGNUPS_VERIFY=true SIGNUPS_VERIFY=true
SIGNUPS_VERIFY_RESEND_TIME=3600 SIGNUPS_VERIFY_RESEND_TIME=3600
@@ -21,7 +27,7 @@ SIGNUPS_DOMAINS_WHITELIST=107421.xyz
ORG_CREATION_USERS=you@107421.xyz ORG_CREATION_USERS=you@107421.xyz
# 使用 openssl rand -base64 48 命令快速生成管理员令牌 # 使用 openssl rand -base64 48 命令快速生成管理员令牌
ADMIN_TOKEN=WnzCmaUPhFsN5jUphoazxw4hlh2cfwjlUPPKJOXxYdLAApxuoFiCIgIjUO8HbaY7 # ADMIN_TOKEN=WnzCmaUPhFsN5jUphoazxw4hlh2cfwjlUPPKJOXxYdLAApxuoFiCIgIjUO8HbaY7
# 允许邀请? # 允许邀请?
INVITATIONS_ALLOWED=true INVITATIONS_ALLOWED=true

View File

@@ -2,7 +2,7 @@ version: '3.3'
services: services:
vaultwarden: vaultwarden:
image: vaultwarden/server:alpine image: vaultwarden/server:1.34.3-alpine
container_name: vault-warden container_name: vault-warden
restart: always restart: always
environment: environment:

View File

@@ -0,0 +1,191 @@
# This file is a template for docker compose deployment
# Copy this file to .env and change the values as needed
# Fully qualified domain name for the deployment. Replace localhost with your domain,
# such as http://mydomain.com.
FQDN=http://localhost
# PostgreSQL Settings
POSTGRES_HOST=postgres
POSTGRES_USER=postgres
POSTGRES_PASSWORD=password
POSTGRES_PORT=5432
POSTGRES_DB=postgres
# Postgres credential for supabase_auth_admin
SUPABASE_PASSWORD=root
# Redis Settings
REDIS_HOST=redis
REDIS_PORT=6379
# Minio Host
MINIO_HOST=minio
MINIO_PORT=9000
AWS_ACCESS_KEY=minioadmin
AWS_SECRET=minioadmin
# AppFlowy Cloud
## URL that connects to the gotrue docker container
APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999
## URL that connects to the postgres docker container. If your password contains special characters, instead of using ${POSTGRES_PASSWORD},
## you will need to convert them into url encoded format. For example, `p@ssword` will become `p%40ssword`.
APPFLOWY_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_ACCESS_CONTROL=true
APPFLOWY_WEBSOCKET_MAILBOX_SIZE=6000
APPFLOWY_DATABASE_MAX_CONNECTIONS=40
## URL that connects to the redis docker container
APPFLOWY_REDIS_URI=redis://${REDIS_HOST}:${REDIS_PORT}
# admin frontend
## URL that connects to redis docker container
ADMIN_FRONTEND_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
## URL that connects to gotrue docker container
ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999
## URL that connects to the cloud docker container
ADMIN_FRONTEND_APPFLOWY_CLOUD_URL=http://appflowy_cloud:8000
## Base Url for the admin frontend. If you use the default Nginx conf provided here, this value should be /console.
## If you want to keep the previous behaviour where admin frontend is served at the root, don't set this env variable,
## or set it to empty string.
ADMIN_FRONTEND_PATH_PREFIX=/console
# authentication key, change this and keep the key safe and secret
# self defined key, you can use any string
GOTRUE_JWT_SECRET=hello456
# Expiration time in seconds for the JWT token
GOTRUE_JWT_EXP=7200
# User sign up will automatically be confirmed if this is set to true.
# If you have OAuth2 set up or smtp configured, you can set this to false
# to enforce email confirmation or OAuth2 login instead.
# If you set this to false, you need to either set up SMTP
GOTRUE_MAILER_AUTOCONFIRM=true
# Number of emails that can be per minute
GOTRUE_RATE_LIMIT_EMAIL_SENT=100
# If you intend to use mail confirmation, you need to set the SMTP configuration below
# You would then need to set GOTRUE_MAILER_AUTOCONFIRM=false
# Check for logs in gotrue service if there are any issues with email confirmation
# Note that smtps will be used for port 465, otherwise plain smtp with optional STARTTLS
GOTRUE_SMTP_HOST=smtp.gmail.com
GOTRUE_SMTP_PORT=465
GOTRUE_SMTP_USER=email_sender@some_company.com
GOTRUE_SMTP_PASS=email_sender_password
GOTRUE_SMTP_ADMIN_EMAIL=comp_admin@some_company.com
# This user will be created when GoTrue starts successfully
# You can use this user to login to the admin panel
GOTRUE_ADMIN_EMAIL=admin@example.com
GOTRUE_ADMIN_PASSWORD=password
# Set this to true if users can only join by invite
GOTRUE_DISABLE_SIGNUP=false
# External URL where the GoTrue service is exposed.
API_EXTERNAL_URL=${FQDN}/gotrue
# GoTrue connect to postgres using this url. If your password contains special characters,
# replace ${SUPABASE_PASSWORD} with the url encoded version. For example, `p@ssword` will become `p%40ssword`
GOTRUE_DATABASE_URL=postgres://supabase_auth_admin:${SUPABASE_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# Refer to this for details: https://github.com/AppFlowy-IO/AppFlowy-Cloud/blob/main/doc/AUTHENTICATION.md
# Google OAuth2
GOTRUE_EXTERNAL_GOOGLE_ENABLED=false
GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID=
GOTRUE_EXTERNAL_GOOGLE_SECRET=
GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# GitHub OAuth2
GOTRUE_EXTERNAL_GITHUB_ENABLED=false
GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=
GOTRUE_EXTERNAL_GITHUB_SECRET=
GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# Discord OAuth2
GOTRUE_EXTERNAL_DISCORD_ENABLED=false
GOTRUE_EXTERNAL_DISCORD_CLIENT_ID=
GOTRUE_EXTERNAL_DISCORD_SECRET=
GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# Apple OAuth2
GOTRUE_EXTERNAL_APPLE_ENABLED=false
GOTRUE_EXTERNAL_APPLE_CLIENT_ID=
GOTRUE_EXTERNAL_APPLE_SECRET=
GOTRUE_EXTERNAL_APPLE_REDIRECT_URI=${API_EXTERNAL_URL}/callback
# File Storage
# Create the bucket if not exists on AppFlowy Cloud start up.
# Set this to false if the bucket has been created externally.
APPFLOWY_S3_CREATE_BUCKET=true
# This is where storage like images, files, etc. will be stored.
# By default, Minio is used as the default file storage which uses host's file system.
# Keep this as true if you are using other S3 compatible storage provider other than AWS.
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT} # change this if you are using a different address for minio
APPFLOWY_S3_ACCESS_KEY=${AWS_ACCESS_KEY}
APPFLOWY_S3_SECRET_KEY=${AWS_SECRET}
APPFLOWY_S3_BUCKET=appflowy
# Uncomment this if you are using AWS S3
# APPFLOWY_S3_REGION=us-east-1
# Uncomment this if you are using the Minio service hosted within this docker compose file
# This is so that, the presigned URL generated by AppFlowy Cloud will use the publicly availabe minio endpoint.
# APPFLOWY_S3_PRESIGNED_URL_ENDPOINT=${FQDN}/minio-api
# AppFlowy Cloud Mailer
# Note that smtps (TLS) is always required, even for ports other than 465
APPFLOWY_MAILER_SMTP_HOST=smtp.gmail.com
APPFLOWY_MAILER_SMTP_PORT=465
APPFLOWY_MAILER_SMTP_USERNAME=email_sender@some_company.com
APPFLOWY_MAILER_SMTP_EMAIL=email_sender@some_company.com
APPFLOWY_MAILER_SMTP_PASSWORD=email_sender_password
APPFLOWY_MAILER_SMTP_TLS_KIND=wrapper # "none" "wrapper" "required" "opportunistic"
# Log level for the appflowy-cloud service
RUST_LOG=info
# PgAdmin
# Optional module to manage the postgres database
# You can access the pgadmin at http://your-host/pgadmin
# Refer to the APPFLOWY_DATABASE_URL for password when connecting to the database
PGADMIN_DEFAULT_EMAIL=admin@example.com
PGADMIN_DEFAULT_PASSWORD=password
# Portainer (username: admin)
PORTAINER_PASSWORD=password1234
# Cloudflare tunnel token
CLOUDFLARE_TUNNEL_TOKEN=
# NGINX
# Optional, change this if you want to use custom ports to expose AppFlowy
NGINX_PORT=80
NGINX_TLS_PORT=443
# AppFlowy AI
AI_OPENAI_API_KEY=
AI_SERVER_PORT=5001
AI_SERVER_HOST=ai
AI_DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
AI_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
LOCAL_AI_TEST_ENABLED=false
AI_APPFLOWY_BUCKET_NAME=${APPFLOWY_S3_BUCKET}
AI_APPFLOWY_HOST=${FQDN}
AI_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
# AppFlowy Indexer
APPFLOWY_INDEXER_ENABLED=true
APPFLOWY_INDEXER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_INDEXER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_INDEXER_EMBEDDING_BUFFER_SIZE=5000
# AppFlowy Collaborate
APPFLOWY_COLLABORATE_MULTI_THREAD=false
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
# AppFlowy Worker
APPFLOWY_WORKER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_WORKER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Web
# If your AppFlowy Web is hosted on a different domain, update this variable to the correct domain
APPFLOWY_WEB_URL=${FQDN}
# If you are running AppFlowy Web locally for development purpose, use the following value instead
# APPFLOWY_WEB_URL=http://localhost:3000

View File

@@ -0,0 +1,8 @@
docker run --rm \
-v $HOME/.Xauthority:/root/.Xauthority:rw \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /dev/dri:/dev/dri \
-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
-v appflowy-data:/home/appflowy \
-e DISPLAY=${DISPLAY} \
appflowyio/appflowy_client:main

View File

@@ -1,6 +1,10 @@
# This file is a template for docker compose deployment # This file is a template for docker compose deployment
# Copy this file to .env and change the values as needed # Copy this file to .env and change the values as needed
# Fully qualified domain name for the deployment. Replace localhost with your domain,
# such as http://mydomain.com.
FQDN=http://0.0.0.0:23000
# PostgreSQL Settings # PostgreSQL Settings
POSTGRES_HOST=postgres POSTGRES_HOST=postgres
POSTGRES_USER=postgres POSTGRES_USER=postgres
@@ -15,6 +19,13 @@ SUPABASE_PASSWORD=V2ryStr@Pss
REDIS_HOST=redis REDIS_HOST=redis
REDIS_PORT=6379 REDIS_PORT=6379
# Minio Host
MINIO_HOST=https://cnk8d6fazu16.compat.objectstorage.ap-seoul-1.oraclecloud.com
MINIO_PORT=443
AWS_ACCESS_KEY=9e413c6e66269bc65d7ec951d93ba9c6a9781f6e
AWS_SECRET=dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU
# AppFlowy Cloud # AppFlowy Cloud
## URL that connects to the gotrue docker container ## URL that connects to the gotrue docker container
APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999 APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999
@@ -31,6 +42,12 @@ APPFLOWY_REDIS_URI=redis://${REDIS_HOST}:${REDIS_PORT}
ADMIN_FRONTEND_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT} ADMIN_FRONTEND_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
## URL that connects to gotrue docker container ## URL that connects to gotrue docker container
ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999 ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999
## URL that connects to the cloud docker container
ADMIN_FRONTEND_APPFLOWY_CLOUD_URL=http://appflowy_cloud:8000
## Base Url for the admin frontend. If you use the default Nginx conf provided here, this value should be /console.
## If you want to keep the previous behaviour where admin frontend is served at the root, don't set this env variable,
## or set it to empty string.
ADMIN_FRONTEND_PATH_PREFIX=/console
# authentication key, change this and keep the key safe and secret # authentication key, change this and keep the key safe and secret
# self defined key, you can use any string # self defined key, you can use any string
@@ -42,7 +59,7 @@ GOTRUE_JWT_EXP=7200
# If you have OAuth2 set up or smtp configured, you can set this to false # If you have OAuth2 set up or smtp configured, you can set this to false
# to enforce email confirmation or OAuth2 login instead. # to enforce email confirmation or OAuth2 login instead.
# If you set this to false, you need to either set up SMTP # If you set this to false, you need to either set up SMTP
GOTRUE_MAILER_AUTOCONFIRM=true GOTRUE_MAILER_AUTOCONFIRM=false
# Number of emails that can be per minute # Number of emails that can be per minute
GOTRUE_RATE_LIMIT_EMAIL_SENT=100 GOTRUE_RATE_LIMIT_EMAIL_SENT=100
@@ -66,7 +83,7 @@ GOTRUE_ADMIN_PASSWORD=lovemm.23
# If you are using a different domain, you need to change the redirect_uri in the OAuth2 configuration # If you are using a different domain, you need to change the redirect_uri in the OAuth2 configuration
# Make sure that this domain is accessible to the user # Make sure that this domain is accessible to the user
# Make sure no endswith / # Make sure no endswith /
API_EXTERNAL_URL=https://note.107421.xyz API_EXTERNAL_URL=${FQDN}/gotrue
# In docker environment, `postgres` is the hostname of the postgres service # In docker environment, `postgres` is the hostname of the postgres service
# GoTrue connect to postgres using this url # GoTrue connect to postgres using this url
@@ -102,15 +119,15 @@ APPFLOWY_S3_CREATE_BUCKET=false
# By default, Minio is used as the default file storage which uses host's file system. # By default, Minio is used as the default file storage which uses host's file system.
# Keep this as true if you are using other S3 compatible storage provider other than AWS. # Keep this as true if you are using other S3 compatible storage provider other than AWS.
APPFLOWY_S3_USE_MINIO=true APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=https://cnk8d6fazu16.compat.objectstorage.ap-seoul-1.oraclecloud.com APPFLOWY_S3_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
APPFLOWY_S3_ACCESS_KEY=9e413c6e66269bc65d7ec951d93ba9c6a9781f6e APPFLOWY_S3_ACCESS_KEY=${AWS_ACCESS_KEY}
APPFLOWY_S3_SECRET_KEY=dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU= APPFLOWY_S3_SECRET_KEY=${AWS_SECRET}
APPFLOWY_S3_BUCKET=seoul-2 APPFLOWY_S3_BUCKET=seoul-2
APPFLOWY_S3_REGION=ap-seoul-1 APPFLOWY_S3_REGION=ap-seoul-1
# AppFlowy Cloud Mailer # AppFlowy Cloud Mailer
# Note that smtps (TLS) is always required, even for ports other than 465 # Note that smtps (TLS) is always required, even for ports other than 465
APPFLOWY_MAILER_SMTP_HOST=smtp.gmail.com APPFLOWY_MAILER_SMTP_HOST=https://smtp.gmail.com
APPFLOWY_MAILER_SMTP_PORT=465 APPFLOWY_MAILER_SMTP_PORT=465
APPFLOWY_MAILER_SMTP_USERNAME=zeaslity@gmail.com APPFLOWY_MAILER_SMTP_USERNAME=zeaslity@gmail.com
APPFLOWY_MAILER_SMTP_PASSWORD=loveff.cxc.56320 APPFLOWY_MAILER_SMTP_PASSWORD=loveff.cxc.56320
@@ -146,11 +163,10 @@ APPFLOWY_AI_SERVER_PORT=5001
APPFLOWY_AI_SERVER_HOST=ai APPFLOWY_AI_SERVER_HOST=ai
APPFLOWY_AI_DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} APPFLOWY_AI_DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
APPFLOWY_LOCAL_AI_TEST_ENABLED=false APPFLOWY_LOCAL_AI_TEST_ENABLED=false
AI_APPFLOWY_BUCKET_NAME=${APPFLOWY_S3_BUCKET}
AI_APPFLOWY_HOST=${FQDN}
AI_MINIO_URL=http://${MINIO_HOST}:${MINIO_PORT}
# AppFlowy History
APPFLOWY_GRPC_HISTORY_ADDRS=http://localhost:50051
APPFLOWY_HISTORY_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_HISTORY_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Indexer # AppFlowy Indexer
APPFLOWY_INDEXER_ENABLED=true APPFLOWY_INDEXER_ENABLED=true
@@ -166,4 +182,4 @@ APPFLOWY_WORKER_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}
APPFLOWY_WORKER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} APPFLOWY_WORKER_DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# AppFlowy Web # AppFlowy Web
APPFLOWY_WEB_URL=http://localhost:3000 APPFLOWY_WEB_URL=${FQDN}

View File

@@ -0,0 +1,514 @@
<p align="center">
<img src="./assets/logo-3071751.jpg">
</p>
# 🤖️ TeleChat
[英文](README.md) | [中文](README_CN.md)
<p align="center">
<a href="https://t.me/+_01cz9tAkUc1YzZl">
<img src="https://img.shields.io/badge/Join Telegram Group-blue?&logo=telegram">
</a>
<a href="https://t.me/chatgpt68_bot">
<img src="https://img.shields.io/badge/Telegram Bot-grey?&logo=Probot">
</a>
<a href="https://hub.docker.com/repository/docker/yym68686/chatgpt">
<img src="https://img.shields.io/docker/pulls/yym68686/chatgpt?color=blue" alt="docker pull">
</a>
</p>
ChatGPT Telegram 机器人是一个强大的 Telegram 机器人,可以使用多种主流的大语言模型 API包括 GPT-3.5/4/4 Turbo/4o/o1DALL·E 3Claude2.1/3/3.5 APIGemini 1.5 Pro/FlashVertex AIClaude系列/Gemini系列Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。它使用户能够在 Telegram 上进行高效的对话和信息搜索。
## ✨ 功能
- **多种AI模型**支持GPT-3.5/4/4 Turbo/4o/o1DALL·E 3Claude2.1/3/3.5 APIGemini 1.5 Pro/FlashVertex AIClaude系列/Gemini系列Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。还支持 one-api/new-api/[uni-api](https://github.com/yym68686/uni-api)。利用自研 API 请求后端 [SDK](https://github.com/yym68686/ModelMerge),不依赖 OpenAI SDK。
- **多模态问答**:支持语音、音频、图像和 PDF/TXT/MD/python 文档的问答。用户可以直接在聊天框中上传文件使用。
- **群聊主题模式**支持在群聊中启用主题模式在主题之间隔离API、对话历史、插件配置和偏好设置。
- **丰富的插件系统**支持网页搜索DuckDuckGo和Google、URL 总结、ArXiv 论文总结和代码解释器。
- **用户友好界面**:允许在聊天窗口内灵活切换模型,并支持类似打字机效果的流式输出。支持精确的 Markdown 消息渲染,利用了我的另一个[项目](https://github.com/yym68686/md2tgmd)。
- **高效消息处理**:异步处理消息,多线程回答问题,支持隔离对话,并为不同用户提供独特对话。
- **长文本消息处理**: 自动合并长文本消息突破Telegram的单条消息长度限制。当机器人的回复超过Telegram的限制时它将被拆分成多条消息。
- **多用户对话隔离**:支持对话隔离和配置隔离,允许在多用户和单用户模式之间进行选择。
- **问题预测**: 自动生成后续问题,预测用户可能会接下来询问的问题。
- **多语言界面**:支持简体中文、繁体中文、俄文和英文界面。
- **白名单、黑名单和管理员设置**:支持设置白名单、黑名单和管理员。
- **内联模式**:允许用户在任何聊天窗口中 @ 机器人以生成答案,而无需在机器人的聊天窗口中提问。
- **方便部署**:支持一键部署到 koyeb、Zeabur、Replit真正零成本和傻瓜式部署流程。还支持 kuma 防休眠,以及 Docker 和 fly.io 部署。
## 🍃 环境变量
以下是与机器人核心设置相关的环境变量列表:
| 变量名称 | 描述 | 是否必需? |
|---------------|-------------|-----------|
| BOT_TOKEN | Telegram 机器人令牌。 在 [BotFather](https://t.me/BotFather) 上创建一个机器人以获取 BOT_TOKEN。 | **是** |
| API | OpenAI 或第三方 API 密钥。 | 否 |
| GPT_ENGINE | 设置默认的QA模型默认是`gpt-4o`。此项可以使用机器人的“info”命令自由切换原则上不需要设置。 | 否 |
| WEB_HOOK | 每当电报机器人收到用户消息时,消息将被传递到 WEB_HOOK机器人将在此监听并及时处理收到的消息。 | 否 |
| API_URL | 如果您使用的是OpenAI官方API则无需设置此项。如果您使用的是第三方API则需要填写第三方代理网站。默认值是https://api.openai.com/v1/chat/completions | 否 |
| GROQ_API_KEY | Groq官方API密钥。 | 否 |
| GOOGLE_AI_API_KEY | Google AI 官方 API 密钥。使用此环境变量访问 Gemini 系列模型,包括 Gemini 1.5 pro 和 Gemini 1.5 flash。| 否 |
| VERTEX_PRIVATE_KEY | 描述: Google Cloud Vertex AI 服务账户的私钥。格式: 包含服务账户私钥信息的 JSON 字符串里面的 private_key 字段的值,请使用双引号包裹私钥。如何获取: 在 Google Cloud 控制台中创建一个服务账户,生成一个 JSON 密钥文件,并将其内容里面的 private_key 字段的值使用双引号包裹后设置为此环境变量的值。 | 否 |
| VERTEX_PROJECT_ID | 描述:您的 Google Cloud 项目 ID。格式一个字符串通常由小写字母、数字和连字符组成。如何获取您可以在 Google Cloud 控制台的项目选择器中找到您的项目 ID。 | 否 |
| VERTEX_CLIENT_EMAIL | 描述Google Cloud Vertex AI 服务账户的电子邮件地址。格式:通常是 "service-account-name@developer.gserviceaccount.com" 形式的字符串。获取方式:在创建服务账户时生成,或可以在 Google Cloud 控制台的 "IAM & 管理" 部分的服务账户详细信息中查看。 | 否 |
| claude_api_key | Claude 官方 API 密钥。 | 否 |
| CLAUDE_API_URL | 如果您使用的是Anthropic官方API则无需设置此项。如果您使用的是第三方Anthropic API则需要填写第三方代理网站。默认值是https://api.anthropic.com/v1/messages | 否 |
| NICK | 默认是空的NICK 是机器人的名字。机器人只会在用户输入的消息以 NICK 开头时才会响应,否则机器人会响应任何消息。特别是在群聊中,如果没有 NICK机器人会回复所有消息。 | 否 |
| GOOGLE_API_KEY | 如果你需要使用谷歌搜索你需要设置它。如果你不设置这个环境变量机器人将默认提供duckduckgo搜索。 | No |
| GOOGLE_CSE_ID | 如果你需要使用谷歌搜索,你需要和 GOOGLE_API_KEY 一起设置。 | 否 |
| whitelist | 设置哪些用户可以访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None`,这意味着机器人对所有人开放。 | 否 |
| BLACK_LIST | 设置哪些用户禁止访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None` | 否 |
| ADMIN_LIST | 设置管理员列表。只有管理员可以使用 `/info` 命令配置机器人。 | 否 |
| GROUP_LIST | 设置可以使用机器人的群组列表。使用逗号(''连接群组ID。即使群组成员不在白名单中只要群组ID在GROUP_LIST中群组的所有成员都可以使用机器人。 | 否 |
| CUSTOM_MODELS | 设置自定义模型名称列表。使用逗号(',')连接模型名称。如果需要删除默认模型,请在默认模型名称前添加连字符(-)。如果要删除所有默认模型,请使用 `-all`。 | 否 |
| CHAT_MODE | 引入多用户模式,不同用户的配置不共享。当 CHAT_MODE 为 `global` 时,所有用户共享配置。当 CHAT_MODE 为 `multiusers` 时,用户配置彼此独立。 | 否 |
| temperature | 指定 LLM 的温度。默认值是 `0.5`。 | 否 |
| GET_MODELS | 指定是否通过 API 获取支持的模型。默认值为 `False`。 | 否 |
| SYSTEMPROMPT | 指定系统提示,系统提示是字符串,例如:`SYSTEMPROMPT=You are ChatGPT, a large language model trained by OpenAI. Respond conversationally`。默认是 `None`。系统提示的设置仅在 `CHAT_MODE``global` 时,系统提示的设置才会有效。当 `CHAT_MODE``multiusers` 时,系统提示的环境变量无论是任何值都不会修改任何用户的系统提示,因为用户不希望自己设置的系统系统被修改为全局系统提示。 | 否 |
| LANGUAGE | 指定机器人显示的默认语言,包括按钮显示语言和对话语言。默认是 `English`。目前仅支持设置为下面四种语言:`English``Simplified Chinese``Traditional Chinese``Russian`。同时也可以在机器人部署后使用 `/info` 命令设置显示语言 | 否 |
| CONFIG_DIR | 指定存储用户配置文件夹。CONFIG_DIR 是用于存储用户配置的文件夹。每次机器人启动时,它都会从 CONFIG_DIR 文件夹读取配置,因此用户每次重新启动时不会丢失之前的设置。您可以在本地使用 Docker 部署时,通过使用 `-v` 参数挂载文件夹来实现配置持久化。默认值是 `user_configs`。 | 否 |
| RESET_TIME | 指定机器人每隔多少秒重置一次聊天历史记录,每隔 RESET_TIME 秒,机器人会重置除了管理员列表外所有用户的聊天历史记录,每个用户重置时间不一样,根据每个用户最后的提问时间来计算下一次重置时间。而不是所有用户在同一时间重置。默认值是 `3600` 秒,最小值是 `60` 秒。 | 否 |
以下是与机器人偏好设置相关的环境变量列表,偏好设置也可以通过机器人启动后使用 `/info` 命令,点击 `偏好设置` 按钮来设置:
| 变量名称 | 描述 | 必需的? |
|---------------|-------------|-----------|
| PASS_HISTORY | 默认值是 `9999`。机器人会记住对话历史,并在下次回复时考虑上下文。如果设置为 `0`机器人将忘记对话历史只考虑当前对话。PASS_HISTORY 的值必须大于或等于 0。对应于偏好设置里面的名为 `对话历史` 的按钮。 | 否 |
| LONG_TEXT | 如果用户的输入消息的文本长度超出了 Telegram 的限制,并在很短的时间内连续发送多个消息,机器人会将这些多个消息视为一个。默认值是 `True`。对应于偏好设置里面的名为 `长文本合并` 的按钮。 | 否 |
| IMAGEQA | 是否启用图像问答,默认设置是模型可以回答图像内容,默认值为 `True`。对应于偏好设置里面的名为 `图片问答` 的按钮。 | 否 |
| LONG_TEXT_SPLIT | 当机器人的回复超过Telegram限制时它将被拆分为多个消息。默认值是 `True`。对应于偏好设置里面的名为 `长文本分割` 的按钮。 | 否 |
| FILE_UPLOAD_MESS | 当文件或图像上传成功并且机器人处理完成时,机器人将发送一条消息,提示上传成功。默认值为 `True`。对应于偏好设置里面的名为 `文件上传成功提示消息` 的按钮。 | 否 |
| FOLLOW_UP | 自动生成多个相关问题供用户选择。默认值为 `False`。对应于偏好设置里面的名为 `猜你想问` 的按钮。 | 否 |
| TITLE | 是否在机器人回复的开头显示模型名称。默认值为 `False`。对应于偏好设置里面的名为 `模型标题` 的按钮。 | 否 |
<!-- | TYPING | 是否在机器人回复时显示“正在输入”状态。默认值为 `False`。 | 否 | -->
| REPLY | 机器人是否应以“回复”格式回复用户的消息。默认值为 `False`。对应于偏好设置里面的名为 `回复消息` 的按钮。 | 否 |
以下是与机器人插件设置相关的环境变量列表:
| 变量名称 | 描述 | 必需的? |
|---------------|-------------|-----------|
| SEARCH | 是否启用搜索插件。默认值为 `True`。 | 否 |
| URL | 是否启用URL摘要插件。默认值为 `True`。 | 否 |
| ARXIV | 是否启用arXiv论文摘要插件。默认值为 `False`。 | 否 |
| CODE | 是否启用代码解释器插件。默认值为 `False`。 | 否 |
| IMAGE | 是否启用图像生成插件。默认值为 `False`。 | 否 |
| DATE | 是否启用日期插件。默认值为 `False`。 | 否 |
## Koyeb 远程部署
可以使用两种方式部署在 koyeb 上部署,一种是使用 Koyeb 提供的 docker 镜像一键部署,另一种是导入本仓库部署。这两种方式都是免费的。第一种方式部署简单,但是无法自动更新,第二种方式部署稍微复杂,但是可以自动更新。
### 一键部署
点击下面的按钮可以自动使用构建好的 docker 镜像一键部署:
[![Deploy to Koyeb](https://www.koyeb.com/static/images/deploy/button.svg)](https://app.koyeb.com/deploy?type=docker&image=docker.io/yym68686/chatgpt:latest&name=chatbot)
⚠️ 注意:使用 Koyeb 部署时,必须添加环境变量 `WEB_HOOK`,否则机器人无法接收消息。使用类似 `https://appname.koyeb.app` 的字符串作为 `WEB_HOOK` 的值Koyeb 会自动分配一个二级域名。
### 仓库部署
1. fork 本仓库 [点击 fork 本仓库](https://github.com/yym68686/ChatGPT-Telegram-Bot/fork)
2. 部署时候需要选择以仓库的方式,`Run command` 设置为 `python3 bot.py``Exposed ports` 设置为 `8080`
3. [安装 pull](https://github.com/apps/pull) 自动同步本仓库。
## Zeabur 远程部署
一键部署:
[![在 Zeabur 上部署](https://zeabur.com/button.svg)](https://zeabur.com/templates/R5JY5O?referralCode=yym68686)
如果您需要后续功能更新,建议采用以下部署方法:
- 首先 fork 这个仓库,然后注册 [Zeabur](https://zeabur.com)。目前Zeabur 不支持免费的 Docker 容器部署。如果你需要使用 Zeabur 来部署这个项目的机器人,你需要升级到 Developer Plan。幸运的是Zeabur 推出了他们的[赞助计划](https://zeabur.com/docs/billing/sponsor),为这个项目的所有贡献者提供一个月的 Developer Plan。如果你有想要增强的功能欢迎提交 pull requests 到这个项目。
- 从您自己的Github仓库导入。
- 设置所需的环境变量,并重新部署。
- 如果您需要后续的功能更新,只需在您自己的代码库中同步此代码库,并在 Zeabur 中重新部署以获取最新功能。
## Replit 远程部署
[![在 Repl.it 上运行](https://replit.com/badge/github/yym68686/ChatGPT-Telegram-Bot)](https://replit.com/new/github/yym68686/ChatGPT-Telegram-Bot)
导入 Github 仓库后,设置运行命令
```bash
pip install -r requirements.txt > /dev/null && python3 bot.py
```
在工具侧边栏中选择“Secrets”添加机器人所需的环境变量其中
- WEB_HOOK: Replit 会自动为您分配一个域名,填写 `https://appname.username.repl.co`
- 记得打开“始终开启”
点击屏幕顶部的运行按钮来运行机器人。
## fly.io 远程部署
官方文档: https://fly.io/docs/
使用 Docker 镜像部署 fly.io 应用程序
```bash
flyctl launch --image yym68686/chatgpt:latest
```
在提示时输入应用程序的名称,并选择“否”以初始化 Postgresql 或 Redis。
按照提示进行部署。官方控制面板将提供一个二级域名,可用于访问服务。
设置环境变量
```bash
flyctl secrets set BOT_TOKEN=bottoken
flyctl secrets set API=
# 可选
flyctl secrets set WEB_HOOK=https://flyio-app-name.fly.dev/
flyctl secrets set NICK=javis
```
查看所有环境变量
```bash
flyctl secrets list
```
删除环境变量
```bash
flyctl secrets unset MY_SECRET DATABASE_URL
```
ssh 到 fly.io 容器
```bash
flyctl ssh issue --agent
# ssh 连接
flyctl ssh establish
```
检查 webhook URL 是否正确
```bash
https://api.telegram.org/bot<token>/getWebhookInfo
```
## Docker 本地部署
启动容器
```bash
docker run -p 80:8080 --name chatbot -dit \
-e BOT_TOKEN=your_telegram_bot_token \
-e API= \
-e API_URL= \
-v ./user_configs:/home/user_configs \
yym68686/chatgpt:latest
```
或者如果你想使用 Docker Compose这里有一个 docker-compose.yml 示例:
```yaml
version: "3.5"
services:
chatgptbot:
container_name: chatgptbot
image: yym68686/chatgpt:latest
environment:
- BOT_TOKEN=
- API=
- API_URL=
volumes:
- ./user_configs:/home/user_configs
ports:
- 80:8080
```
在后台运行 Docker Compose 容器
```bash
docker-compose pull
docker-compose up -d
# uni-api
docker-compose -f docker-compose-uni-api.yml up -d
```
将存储库中的Docker镜像打包并上传到Docker Hub
```bash
docker build --no-cache -t chatgpt:latest -f Dockerfile.build --platform linux/amd64 .
docker tag chatgpt:latest yym68686/chatgpt:latest
docker push yym68686/chatgpt:latest
```
一键重启 Docker 镜像
```bash
set -eu
docker pull yym68686/chatgpt:latest
docker rm -f chatbot
docker run -p 8080:8080 -dit --name chatbot \
-e BOT_TOKEN= \
-e API= \
-e API_URL= \
-e GOOGLE_API_KEY= \
-e GOOGLE_CSE_ID= \
-e claude_api_key= \
-v ./user_configs:/home/user_configs \
yym68686/chatgpt:latest
docker logs -f chatbot
```
该脚本用于通过单个命令重启Docker镜像。它首先删除名为“chatbot”的现有Docker容器如果存在。然后它运行一个名为“chatbot”的新Docker容器暴露端口8080并设置各种环境变量。使用的Docker镜像是“yym68686/chatgpt:latest”。最后它跟踪“chatbot”容器的日志。
## 🚀 源代码本地部署
python >= 3.10
直接从源代码运行机器人而不使用docker克隆仓库
```bash
git clone --recurse-submodules --depth 1 -b main --quiet https://github.com/yym68686/ChatGPT-Telegram-Bot.git
```
安装依赖项:
```bash
pip install -r requirements.txt
```
设置环境变量:
```bash
export BOT_TOKEN=
export API=
```
运行:
```bash
python bot.py
```
## 🧩 插件
机器人支持多种插件包括DuckDuckGo 和 Google 搜索、URL 摘要、ArXiv 论文摘要、DALLE-3 画图和代码解释器等。您可以通过设置环境变量来启用或禁用这些插件。
- 如何开发插件?
插件相关的代码全部在本仓库git 子模块ModelMerge里面ModelMerge是我开发的一个独立的仓库用于处理API请求对话历史记录管理等功能。当你使用git clone的--recurse-submodules参数克隆本仓库后ModelMerge会自动下载到本地。插件所有的代码在本仓库中的相对路径为 `ModelMerge/src/ModelMerge/plugins`。你可以在这个目录下添加自己的插件代码。插件开发的流程如下:
1.`ModelMerge/src/ModelMerge/plugins` 目录下创建一个新的 Python 文件,例如 `myplugin.py`。在 `ModelMerge/src/ModelMerge/plugins/__init__.py` 文件中导入你的插件,例如 `from .myplugin import MyPlugin`
2.`ModelMerge/src/ModelMerge/tools/chatgpt.py` 里面的 `function_call_list` 变量中添加你的插件OpenAI tool格式详细的请求体。Claude Gemini tool 不需要额外编写你仅需要填写OpenAI格式的tool请求体程序在请求Gemini或者Claude API的时候会自动转换为Claude/Gemini tool格式。`function_call_list` 是一个字典,键是插件的名称,值是插件的请求体。请保证`function_call_list` 字典的键名保证唯一性,不能和已有的插件键名重复。
3.`ModelMerge/src/ModelMerge/plugins/config.py` 里面的 `PLUGINS` 字典里面添加键值对,键是插件的名称,值是插件的环境变量及其默认值。这个默认值是插件的开关,如果默认值是`True`,那么插件默认是开启的,如果默认值是 `False`,那么插件默认是关闭的,需要在用户在 `/info` 命令里面手动开启。
4. 最后,在 `ModelMerge/src/ModelMerge/plugins/config.py` 里面的函数 `get_tools_result_async` 添加插件调用的代码,当机器人需要调用插件的时候,会调用这个函数。你需要在这个函数里面添加插件的调用代码。
完成上面的步骤,你的插件就可以在机器人中使用了。🎉
## 📄 常见问题
- WEB_HOOK 环境变量有什么用?应该如何使用?
WEB_HOOK 是一个 webhook 地址。具体来说,当 Telegram 机器人收到用户消息时,它会将消息发送到 Telegram 服务器,然后 Telegram 服务器将消息转发到机器人设置的 WEB_HOOK 地址的服务器。因此,当消息发送到机器人时,机器人几乎立即执行处理程序。通过 WEB_HOOK 接收消息比未设置 WEB_HOOK 时的响应时间更快。
当使用 Zeabur、Replit 或 Koyeb 等平台部署机器人时,这些平台会提供一个域名,你需要将其填写在 WEB_HOOK 中,以便机器人接收用户消息。当然,不设置 WEB_HOOK 也是可以的,但机器人的响应时间会稍长一些,虽然差别不大,所以一般来说不需要设置 WEB_HOOK。
当在服务器上部署一个机器人时你需要使用像nginx或caddy这样的反向代理工具将Telegram服务器发送的消息转发到你的服务器这样机器人才能接收到用户消息。因此你需要将WEB_HOOK设置为你服务器的域名并将请求WEB_HOOK的流量转发到机器人所在的服务器和相应端口。例如在caddy中你可以在caddy配置文件/etc/caddy/Caddyfile中这样配置
```caddy
your_webhook_domain.com {
reverse_proxy localhost:8082
}
```
- 为什么我不能使用谷歌搜索?
默认情况下提供DuckDuckGo搜索。Google搜索的官方API需要用户申请。它可以提供GPT之前无法回答的实时信息例如今天微博的热门话题、特定地点的今日天气以及某个人或新闻事件的进展。
- 为什么即使我添加了Google搜索API我还是不能使用搜索功能
有两种可能性:
1. 只有支持工具使用的大型语言模型LLMAPI才能使用搜索功能。目前本项目仅支持 OpenAI、Claude 和 Gemini 系列模型的 API 进行搜索功能。其他模型提供商的 API 目前不支持在本项目中使用工具。如果您有希望适配的模型提供商,可以联系维护者。
2. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API但无法使用搜索功能可能是因为搜索功能未启用。您可以通过 `/info` 命令点击偏好设置来检查搜索功能是否启用。
3. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API请确保你使用的是官方 API如果你使用的是第三方中转 API提供商可能通过网页逆向的方式向你提供 API通过网页逆向的方式提供 API 无法使用 tools use即不能使用本项目所有的插件。如果你确认你使用的是官方 API仍然无法成功搜索请联系开发人员。
- 我如何切换模型?
您可以在聊天窗口中使用 "/info" 命令在 GPT3.5/4/4o 和其他模型之间切换。
- 它可以在一个群组中部署吗?
是的,它支持群组白名单以防止滥用和信息泄露。
- 为什么我把机器人添加到群组后它不能说话?
如果这是您第一次将机器人添加到群聊中您需要在botfather中将群组隐私设置为禁用然后将机器人从群聊中移除并重新添加以便正常使用。
第二种方法是将机器人设置为管理员,这样机器人就可以正常使用了。然而,如果你想将机器人添加到你不是管理员的群聊中,第一种方法更为合适。
另一种可能性是 GROUP_LIST 集不是当前的群聊 ID。请检查是否设置了 GROUP_LISTGROUP_LIST 是群 ID而不是群名称。群 ID 以减号开头,后跟一串数字。
- GROUP_LIST、ADMIN_LIST 和白名单的设置如何影响机器人的行为?
如果未设置白名单所有人都可以使用机器人。如果设置了白名单只有白名单中的用户可以使用机器人。如果设置了GROUP_LIST只有GROUP_LIST中的群组可以使用机器人。如果同时设置了白名单和GROUP_LIST群组中的所有人都可以使用机器人但只有白名单中的用户可以私聊机器人。如果设置了ADMIN_LIST只有ADMIN_LIST中的用户可以使用/info命令来更改机器人的设置。如果未设置ADMIN_LIST所有人都可以使用/info命令来更改机器人的配置。GROUP_LIST 也可以包含频道频道ID以减号开头后跟一串数字。
- 我应该如何设置 API_URL
API_URL 支持所有后缀包括https://api.openai.com/v1/chat/completions、https://api.openai.com/v1 和 https://api.openai.com/。机器人将根据不同的用途自动分配不同的端点。
- 是否有必要配置 web_hook 环境变量?
web_hook 不是强制性的环境变量。你只需要设置域名(必须与 WEB_HOOK 一致)和其他根据你的应用功能所需的环境变量。
- 我用docker compose部署了一个机器人。如果文档放在本地服务器上应该挂载到哪个目录才能生效我需要设置额外的配置和修改代码吗
您可以直接通过聊天框将文档发送给机器人,机器人会自动解析文档。要使用文档对话功能,您需要启用历史对话功能。无需对文档进行额外处理。
- 我还是无法让它正常工作……我想在一个群组中使用它,我已经将 ADMIN_LIST 设置为我自己,并将 GROUP_LIST 设置为那个群组,白名单留空。但是,只有我可以在那个群组中使用它,群组中的其他成员被提示没有权限,这是怎么回事?
这是一个故障排除指南:请仔细检查 GROUP_LIST 是否正确。Telegram 群组的 ID 以负号开头,后跟一系列数字。如果不是,请使用此机器人 [bot](https://t.me/getidsbot) 重新获取群组 ID。
- 我上传了一个文档,但它没有根据文档的内容做出响应。怎么回事?
要使用文档问答功能,您必须先启用历史记录。您可以通过 `/info` 命令开启历史记录,或者通过将环境变量 `PASS_HISTORY` 设置为大于2来默认启用历史记录。请注意启用历史记录将会产生额外费用因此该项目默认不启用历史记录。这意味着在默认设置下无法使用问答功能。在使用此功能之前您需要手动启用历史记录。
- 设置 `NICK` 后,当我 @ 机器人时没有响应,它只在消息以昵称开头时才回复。我怎样才能让它同时响应昵称和 @机器人名
在群聊场景中,如果环境变量 `NICK` 未设置,机器人将接收所有群消息并回应所有消息。因此,有必要设置 `NICK`。设置 `NICK` 后,机器人只会回应以 `NICK` 开头的消息。所以,如果你想 @ 机器人以获得回应,你只需将 NICK 设置为 @botname。这样,当你在群里 @ 机器人时,机器人会检测到消息是以 @botname 开头的,并会回应该消息。
- 历史会保留多少条消息?
所有其他模型使用官方上下文长度设置,例如,`gpt-3.5-turbo-16k` 的上下文是 16k`gpt-4o` 的上下文是 128k`Claude3/3.5` 的上下文是 200k。此限制是为了节省用户成本因为大多数场景不需要高上下文。
- 如何从模型列表中删除默认模型名称?
你可以使用 `CUSTOM_MODELS` 环境变量来完成它。例如,如果你想添加 gpt-4o 并从模型列表中移除 gpt-3.5 模型,请将 `CUSTOM_MODELS` 设置为 `gpt-4o,-gpt-3.5`。如果你想一次性删除所有默认模型,你可以将 `CUSTOM_MODELS` 设置为 `-all,gpt-4o`
- 对话隔离具体是如何工作的?
对话总是基于不同的窗口隔离而不是不同的用户。这意味着在同一个群聊窗口、同一个主题和同一个私聊窗口内都会被视为同一个对话。CHAT_MODE 只影响配置是否隔离。在多用户模式下,每个用户的插件配置、偏好等都是独立的,互不影响。在单用户模式下,所有用户共享相同的插件配置和偏好。然而,对话历史总是隔离的。对话隔离是为了保护用户隐私,确保用户的对话历史、插件配置、偏好等不被其他用户看到。
- 为什么 Docker 镜像很久没有更新了?
Docker 镜像只存储程序的运行环境。目前,程序的运行环境是稳定的,环境依赖几乎没有变化,所以 Docker 镜像没有更新。每次重新部署 Docker 镜像时,它会拉取最新的代码,因此不需要担心 Docker 镜像更新的问题。
- 为什么容器在启动后报告错误 "http connect error or telegram.error.TimedOut: Timed out"?
此问题可能是由于部署 Docker 的服务器无法连接到 Telegram 服务器或 Telegram 服务器的不稳定性引起的。
1. 在大多数情况下,重新启动服务,检查服务器网络环境,或等待 Telegram 服务恢复即可。
2. 此外您可以尝试通过网络钩子与Telegram服务器进行通信这可能会解决问题。
- 如何让 docker 无限重试而不是一开始就停止?
Docker 中的 `--restart unless-stopped` 参数设置容器的重启策略。具体来说:
1. unless-stopped: 这个策略意味着容器如果停止了会自动重启除非它是被手动停止的。换句话说如果容器由于错误或系统重启而停止它会自动重启。然而如果你手动停止了容器例如使用docker stop命令它将不会自行重启。
此参数对于需要连续运行的服务特别有用,因为它确保服务能够在意外中断后自动恢复,而无需手动干预。
2. 示例:假设你有一个运行 web 服务器的 Docker 容器,并且你希望它在崩溃或系统重启时自动重启,但在你手动停止它时不重启。你可以使用以下命令:
```shell
docker run -d --name my-web-server -p 80:80 --restart unless-stopped my-web-server-image
```
在此示例中,名为 my-web-server 的 web 服务器容器将自动重新启动,除非您手动停止它。
- 切换模型,我需要重新输入提示吗?
是的,因为切换模型会重置历史记录,所以您需要重新输入提示。
- PASS_HISTORY 的适当值是什么?
PASS_HISTORY的数量严格等于对话历史中的消息数量。推荐值是2因为系统提示占用了一个消息计数。如果设置为0PASS_HISTORY将自动重置为2以确保对话正常进行。当PASS_HISTORY小于或等于2时机器人的行为可以被视为只记住当前对话即一个问题和一个答案并且下次不会记住之前的问答内容。PASS_HISTORY的最大值没有限制但请注意对话历史中的消息越多每次对话的成本就越高。当未设置PASS_HISTORY时默认值为9999表示对话历史中的消息数量为9999。
- 机器人令牌可以有多个令牌吗?
不,将来它会支持多个机器人令牌。
- 如何使用机器人命令?
1. `/info`: 机器人 `/info` 命令可以查看机器人的配置信息包括当前使用的模型、API URL、API 密钥等。它还可以更改机器人的显示语言、偏好设置和插件设置。
2. `/start`:机器人 `/start` 命令可以查看机器人的使用说明、使用方法和功能介绍。您可以使用 `/start` 命令设置 API 密钥。如果您有官方的 OpenAI API 密钥,请使用以下命令:`/start your_api_key`。如果您使用的是第三方 API 密钥,请使用以下命令:`/start https://your_api_url your_api_key`
3. `/reset`:机器人 `/reset` 命令可以清除机器人的对话消息,并强制机器人停止生成回复。如果你想重置系统提示,请使用以下命令:`/reset your_system_prompt`。但是,`/reset` 命令永远不会恢复机器人的显示语言、偏好设置、插件设置、使用中的模型、API URL、API 密钥、系统提示等。
- 如果 Koyeb 部署失败怎么办?
Koyeb 的免费服务可能有点不稳定,所以部署失败是很常见的。你可以尝试重新部署,如果还是不行的话,考虑换到另一个平台。😊
- 为什么我使用 CUSTOM_MODELS 删除默认模型名称后,再使用 /info 命令检查时它又重新出现了?
如果你使用 `docker-compose.yml` 部署,不要在 `CUSTOM_MODELS` 的值周围添加引号。错误用法:`CUSTOM_MODELS="gpt-4o,-gpt-3.5"`,否则会导致环境变量解析错误,导致默认模型名称再次出现。错误的方式会被解析为删除 `gpt-3.5"` 模型,这将导致默认模型名称 `gpt-3.5` 未被删除。正确的写法是:`CUSTOM_MODELS=gpt-4o,-gpt-3.5`
## 参考文献
https://core.telegram.org/bots/api
https://github.com/acheong08/ChatGPT
https://github.com/franalgaba/chatgpt-telegram-bot-serverless
https://github.com/gpchelkin/scdlbot/blob/d64d14f6c6d357ba818e80b8a0a9291c2146d6fe/scdlbot/__main__.py#L8
消息使用的markdown渲染是我的另一个[项目](https://github.com/yym68686/md2tgmd)。
duckduckgo AI: https://github.com/mrgick/duck_chat
## 赞助商
我们感谢以下赞助商的支持:
<!-- $300+$380+¥1200+¥300+$30+$25+$20+¥50 -->
- @fasizhuanqian: 300 USDT
- @ZETA: $380
- @yuerbujin: ¥1200
- @RR5AM: ¥300
- @IKUNONHK: 30 USDT
- @miya0v0: 30 USDT
- [@Zeabur](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss): $25
- @Bill_ZKE: 20 USDT
- @wagon_look¥50
<!-- [![Deployed on Zeabur](https://zeabur.com/deployed-on-zeabur-dark.svg)](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss) -->
## 如何赞助我们
如果您想支持我们的项目,您可以通过以下方式赞助我们:
1. [PayPal](https://www.paypal.me/yym68686)
2. [USDT-TRC20](https://pb.yym68686.top/~USDT-TRC20)USDT-TRC20 钱包地址:`TLFbqSv5pDu5he43mVmK1dNx7yBMFeN7d8`
3. [微信](https://pb.yym68686.top/~wechat)
4. [支付宝](https://pb.yym68686.top/~alipay)
感谢您的支持!
## 星星历史
<a href="https://github.com/yym68686/ChatGPT-Telegram-Bot/stargazers">
<img width="500" alt="星历史图表" src="https://api.star-history.com/svg?repos=yym68686/ChatGPT-Telegram-Bot&type=Date">
</a>
## 许可证
本项目根据 GPLv3 许可证授权,这意味着您可以自由复制、分发和修改该软件,只要所有修改和衍生作品也以相同的许可证发布。

View File

@@ -0,0 +1,22 @@
version: "3.5"
services:
chatgptbot:
container_name: chatgptbot
image: yym68686/chatgpt:latest
environment:
- BOT_TOKEN=7908126551:AAE8VhwwfcZ3ru-ecJJo_bMADYADgh1Shzs
- whitelist=6868680170
- ADMIN_LIST=6868680170
- temperature=0.7
- LANGUAGE=Simplified Chinese
- PASS_HISTORY=5
- GOOGLE_AI_API_KEY=AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM
- API_URL=https://api.x.ai/v1/chat/completions
- API=xai-pQCto8hXbSLey5rHjohMZGjqaOlSwgFhofEckr5a7q9wQaJbpAV5xyEVGoq8JbhBoX1QVgUm5GzK2DkG
- GROQ_API_KEY=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
- CUSTOM_MODELS=grok-2-latest
- GPT_ENGINE=grok-2-latest
volumes:
- ./user_configs:/home/user_configs
ports:
- 3080:8080

View File

@@ -0,0 +1,2 @@
https://github.com/yym68686/ChatGPT-Telegram-Bot

View File

@@ -1,5 +1,3 @@
# https://hub.docker.com/r/bitnami/gitea
version: '2' version: '2'
services: services:
postgresql: postgresql:
@@ -12,7 +10,7 @@ services:
- POSTGRESQL_PASSWORD=Superwdd.12 - POSTGRESQL_PASSWORD=Superwdd.12
# ALLOW_EMPTY_PASSWORD is recommended only for development. # ALLOW_EMPTY_PASSWORD is recommended only for development.
gitea: gitea:
image: docker.io/bitnami/gitea:1.19.3-debian-11-r0 image: docker.io/bitnami/gitea:1.24.5-debian-12-r0
volumes: volumes:
- '/data/gitea/gitea_data:/bitnami/gitea' - '/data/gitea/gitea_data:/bitnami/gitea'
environment: environment:
@@ -24,11 +22,11 @@ services:
- GITEA_ADMIN_PASSWORD=lovemm.23 - GITEA_ADMIN_PASSWORD=lovemm.23
- GITEA_ADMIN_EMAIL=wdd@107421.xyz - GITEA_ADMIN_EMAIL=wdd@107421.xyz
- GITEA_HTTP_PORT=3000 - GITEA_HTTP_PORT=3000
# - GITEA_DOMAIN=gitea.107421.xyz
# - GITEA_ROOT_URL=gitea.107421.xyz
- GITEA_SSH_LISTEN_PORT=22222 - GITEA_SSH_LISTEN_PORT=22222
- GITEA_APP_NAME=Gitea-闲下来就喝杯茶吧 - ARCHIVE_CLEANUP_ENABLED = true
- GITEA_DOMAIN=192.168.35.70 - ARCHIVE_CLEANUP_TIMEOUT = 168h #设置归档文件过期时间默认7天
- GITEA_PROTOCOL=http
- GITEA_RUN_MODE=prod
ports: ports:
- '3000:3000' - '3000:3000'
- '22222:22222' - '22222:22222'

View File

@@ -1,16 +0,0 @@
#!/bin/bash
export MIOIO_DATA_PATH=/var/lib/docker/minio-pv
mkdir -p ${MIOIO_DATA_PATH}
chown -R 1001:1001 ${MIOIO_DATA_PATH}
docker run -d \
--env MINIO_ACCESS_KEY="cmii" \
--env MINIO_SECRET_KEY="boge14@Level5" \
--volume ${MIOIO_DATA_PATH}:/data \
--network host \
--name minio-server \
bitnami/minio:2021.11.24-debian-10-r0

View File

@@ -1,14 +0,0 @@
#!/bin/bash
docker run -d \
-e MODE=standalone \
-e MYSQL_SERVICE_HOST=localhost \
-e MYSQL_SERVICE_PORT=33306 \
-e MYSQL_SERVICE_DB_NAME=nacos_config \
-e MYSQL_SERVICE_USER=root \
-e MYSQL_SERVICE_PASSWORD=boge14@Level5 \
--name nacos-server \
--network host \
nacos/nacos-server:2.0.2

View File

@@ -1,8 +1,14 @@
#!/bin/bash #!/bin/bash
curl -X GET "https://api.cloudflare.com/client/v4/user/tokens/verify" \
-H "Authorization: Bearer T7LxBemfe8SNGWkT9uz2XIc1e22ifAbBv_POJvDP" \
-H "Content-Type:application/json"
#export DOMAIN_NAME=chat.107421.xyz #export DOMAIN_NAME=chat.107421.xyz
export DOMAIN_NAME=push.107421.xyz export DOMAIN_NAME=push.107421.xyz
# 可以操作DNS的API Token
export CF_Token="oXJRP5XI8Zhipa_PtYtB_jy6qWL0I9BosrJEYE8p" export CF_Token="oXJRP5XI8Zhipa_PtYtB_jy6qWL0I9BosrJEYE8p"
export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191" export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191"
export CF_Zone_ID="511894a4f1357feb905e974e16241ebb" export CF_Zone_ID="511894a4f1357feb905e974e16241ebb"

View File

@@ -69,7 +69,7 @@ sudo sysctl -p /etc/sysctl.d/proxy-wdd.conf
sysctl net.ipv4.tcp_congestion_control sysctl net.ipv4.tcp_congestion_control
sudo ethtool -K enp0s3 gro on sudo ethtool -K enp3s0 gro on
sudo ethtool -K enp0s3 gso on sudo ethtool -K enp3s0 gso on
sudo ethtool -K enp0s3 tso on sudo ethtool -K enp3s0 tso on

View File

@@ -1,38 +0,0 @@
{
"listen": "0.0.0.0",
"port": 29999,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "RoMoH00dOl3zaQjdUKB6W0SS-wDYENgI3I7cREYwp1M",
"flow": "xtls-rprx-vision"
}
],
"decryption": "none"
},
"streamSettings": {
"network": "tcp",
"security": "reality",
"realitySettings": {
"dest": "speed.cloudflare.com",
"serverNames": [
"speed.cloudflare.com"
],
"privateKey": "yNsDptp-3i-KqhLHA-RBLrVlJuiYeDUekirp-fkerQA",
"shortIds": [
"abc124cc",
"666asdcd"
]
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls",
"quic"
],
"routeOnly": true
}
}
}

View File

@@ -0,0 +1,82 @@
{
"log": {
"loglevel": "info"
},
"inbounds": [
{
"port": 24443,
"protocol": "vless",
"tag": "proxy",
"settings": {
"clients": [
{
"id": "f8702759-f402-4e85-92a6-8540d577de22",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
],
"routing": {
"domainStrategy": "AsIs",
"domainMatcher": "hybrid",
"rules": [
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24444,
"protocol": "vless",
"tag": "fv-ge-frk",
"settings": {
"clients": [
{
"id": "6055eac4-dee7-463b-b575-d30ea94bb768",
"flow": "xtls-rprx-vision",
"email": "franklin@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-ge-frk",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "de-01.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-ge-frk"
],
"outboundTag": "fv-ge-frk"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24445,
"protocol": "vless",
"tag": "fv-kr-sel",
"settings": {
"clients": [
{
"id": "1cd284b2-d3d8-4165-b773-893f836c2b51",
"flow": "xtls-rprx-vision",
"email": "seoul@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-kr-sel",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "kr.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-kr-sel"
],
"outboundTag": "fv-kr-sel"
}
]
}
}

View File

@@ -0,0 +1,84 @@
{
"inbounds": [
{
"port": 24446,
"protocol": "vless",
"tag": "fv-jp-tyk",
"settings": {
"clients": [
{
"id": "bf0e9c35-84a9-460e-b5bf-2fa9f2fb3bca",
"flow": "xtls-rprx-vision",
"email": "seoul@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-jp-tyk",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "jpjp.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-jp-tyk"
],
"outboundTag": "fv-jp-tyk"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24447,
"protocol": "vless",
"tag": "fv-uk-lon",
"settings": {
"clients": [
{
"id": "adc19390-373d-4dfc-b0f6-19fab1b6fbf6",
"flow": "xtls-rprx-vision",
"email": "london@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-uk-lon",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "uk-02.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-uk-lon"
],
"outboundTag": "fv-uk-lon"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24448,
"protocol": "vless",
"tag": "fv-sgp",
"settings": {
"clients": [
{
"id": "e31bc28e-8ebd-4d72-a98e-9227f26dfac3",
"flow": "xtls-rprx-vision",
"email": "singapore@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fv-sgp",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "sg-01.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fv-sgp"
],
"outboundTag": "fv-sgp"
}
]
}
}

View File

@@ -0,0 +1,85 @@
{
"inbounds": [
{
"port": 24452,
"protocol": "vless",
"tag": "fastestvpm-hongkong",
"settings": {
"clients": [
{
"id": "cdf0b19a-9524-48d5-b697-5f10bb567734",
"flow": "xtls-rprx-vision",
"email": "hongkong@fastestvpn.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "wireguard",
"tag": "fastestvpm-hongkong",
"settings": {
"secretKey": "2CAHWJu6+lHWf3teVHLuXoF4Vad6xknSY/qLWPvgoGY=",
"address": ["172.16.145.79/32"],
"peers": [
{
"publicKey": "658QxufMbjOTmB61Z7f+c7Rjg7oqWLnepTalqBERjF0=",
"endpoint": "hk.jumptoserver.com:51820"
}
]
}
}
],
"routing": {
"rules": [
{
"type": "field",
"inboundTag": [
"fastestvpm-hongkong"
],
"outboundTag": "fastestvpm-hongkong"
}
]
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,12 +4,13 @@
}, },
"inbounds": [ "inbounds": [
{ {
"port": 443, "port": 24443,
"protocol": "vless", "protocol": "vless",
"tag": "proxy",
"settings": { "settings": {
"clients": [ "clients": [
{ {
"id": "b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0", "id": "f8702759-f402-4e85-92a6-8540d577de22",
"flow": "xtls-rprx-vision", "flow": "xtls-rprx-vision",
"email": "cc@vless.com", "email": "cc@vless.com",
"level": 0 "level": 0
@@ -17,11 +18,6 @@
], ],
"decryption": "none", "decryption": "none",
"fallbacks": [ "fallbacks": [
{
"name": "xx.tc.hk.go.107421.xyz",
"alpn": "h2",
"dest": "@trojan-h2"
},
{ {
"dest": "/dev/shm/h2c.sock", "dest": "/dev/shm/h2c.sock",
"xver": 2, "xver": 2,
@@ -42,11 +38,6 @@
"ocspStapling": 3600, "ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer", "certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key" "keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
},
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
} }
], ],
"minVersion": "1.2", "minVersion": "1.2",
@@ -64,31 +55,28 @@
"tls" "tls"
] ]
} }
},
{
"listen": "@trojan-h2",
"protocol": "trojan",
"settings": {
"clients": [
{
"email": "ice@qq.com",
"password": "Vad3.123a)asd1234-asdasd.asdazzS.123",
"level": 0
}
]
},
"streamSettings": {
"network": "h2",
"security": "none",
"httpSettings": {
"path": "/status"
}
}
} }
], ],
"outbounds": [ "outbounds": [
{ {
"protocol": "freedom" "protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
],
"routing": {
"domainStrategy": "AsIs",
"domainMatcher": "hybrid",
"rules": [
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
} }
] ]
}
} }

View File

@@ -1,126 +0,0 @@
{
"log": {
"loglevel": "debug"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0",
"flow": "xtls-rprx-vision"
}
],
"decryption": "none",
"fallbacks": [
{
"name": "xx.tc.hk.go.107421.xyz",
"path": "/status",
"dest": 5000,
"xver": 1
},
{
"name": "book.107421.xyz",
"dest": 5003,
"xver": 1
},
{
"name": "book.107421.xyz",
"alpn": "h2",
"dest": 5004,
"xver": 1
},
{
"dest": 5001,
"xver": 1
},
{
"alpn": "h2",
"dest": 5002,
"xver": 1
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"alpn": ["h2", "http/1.1"],
"certificates": [
{
"certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key"
}
]
}
}
},
{
"port": 5000,
"listen": "127.0.0.1",
"protocol": "vless",
"settings": {
"clients": [
{
"id": "481d1403-de9a-5ae1-b921-18c04a4a9da0",
"level": 0,
"email": "dd@qq.com"
}
],
"decryption": "none"
},
"streamSettings": {
"network": "ws",
"security": "tls",
"wsSettings": {
"acceptProxyProtocol": true,
"path": "/status"
},
"tlsSettings": {
"alpn": ["h2", "http/1.1"],
"certificates": [
{
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
}
]
}
}
},
{
"listen": "0.0.0.0",
"port": 29999,
"protocol": "trojan",
"settings": {
"clients": [
{
"password": "V2ryStr0ngP0ss"
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"alpn": [
"h2",
"http/1.1"
],
"certificates": [
{
"certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key"
}
]
}
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

View File

@@ -20,6 +20,13 @@
{ {
"name": "pan.107421.xyz", "name": "pan.107421.xyz",
"dest": 5003, "dest": 5003,
"alpn": "h2",
"xver": 2
},
{
"name": "push.107421.xyz",
"dest": 5004,
"alpn": "h2",
"xver": 2 "xver": 2
}, },
{ {
@@ -62,6 +69,11 @@
"ocspStapling": 3600, "ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/pan.107421.xyz_ecc/fullchain.cer", "certificateFile": "/root/.acme.sh/pan.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/pan.107421.xyz_ecc/pan.107421.xyz.key" "keyFile": "/root/.acme.sh/pan.107421.xyz_ecc/pan.107421.xyz.key"
},
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/push.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/push.107421.xyz_ecc/push.107421.xyz.key"
} }
], ],
"minVersion": "1.2", "minVersion": "1.2",

View File

@@ -0,0 +1,308 @@
{
"log": {
"loglevel": "error"
},
"inbounds": [
{
"port": 443,
"tag": "Seoul-amd64-01",
"protocol": "vless",
"settings": {
"clients": [
{
"id": "1089cc14-557e-47ac-ac85-c07957b3cce3",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.s4.cc.hh.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.s4.cc.hh.107421.xyz_ecc/xx.s4.cc.hh.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 20443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Tokyo-amd64-02",
"settings": {
"clients": [
{
"id": "21dab95b-088e-47bd-8351-609fd23cb33c",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.t2.ll.c0.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.t2.ll.c0.107421.xyz_ecc/xx.t2.ll.c0.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 21443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Osaka-amd64-01",
"settings": {
"clients": [
{
"id": "4c2dd763-56e5-408f-bc8f-dbf4c1fe41f9",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.o1.vl.s4.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.o1.vl.s4.107421.xyz_ecc/xx.o1.vl.s4.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
},
{
"port": 22443,
"protocol": "vless",
"tag": "Seoul-amd64-01->Phoenix-amd64-02",
"settings": {
"clients": [
{
"id": "de576486-e254-4d9d-949a-37088358ec23",
"flow": "xtls-rprx-vision",
"email": "phoneix@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.p2.vl.s4.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.p2.vl.s4.107421.xyz_ecc/xx.p2.vl.s4.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "Seoul-amd64-01"
},
{
"protocol": "blackhole",
"tag": "block"
},
{
"tag": "Seoul-amd64-01->Tokyo-amd64-02",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "140.238.52.228",
"port": 1234
}
]
}
},
{
"tag": "Seoul-amd64-01->Phoenix-amd64-02",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "129.146.171.163",
"port": 1234
}
]
}
},
{
"tag": "Seoul-amd64-01->Osaka-amd64-01",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "140.83.84.142",
"port": 1234
}
]
}
}
],
"routing": {
"domainStrategy": "IPIfNonMatch",
"rules": [
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01"
],
"outboundTag": "Seoul-amd64-01"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Tokyo-amd64-02"
],
"outboundTag": "Seoul-amd64-01->Tokyo-amd64-02"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Phoenix-amd64-02"
],
"outboundTag": "Seoul-amd64-01->Phoenix-amd64-02"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->London-amd64-01"
],
"outboundTag": "Seoul-amd64-01->London-amd64-01"
},
{
"type": "field",
"inboundTag": [
"Seoul-amd64-01->Osaka-amd64-01"
],
"outboundTag": "Seoul-amd64-01->Osaka-amd64-01"
}
]
}
}

View File

@@ -0,0 +1,106 @@
# -----------------------------------------------------------------------------
# Clash 优化配置文件 (Optimized Clash Configuration File)
#
# 本配置旨在解决国内网站访问慢、DNS 解析超时等常见问题。
# 核心思想:通过精细化的 DNS 配置,实现国内外域名智能分流解析,
# 配合高效的规则集,达到最佳的网络访问体验。
# -----------------------------------------------------------------------------
# [通用设置] General Settings
# 混合端口:同时支持 HTTP 和 SOCKS5 代理协议。
mixed-port: 7890
# 允许局域网连接:允许同一局域网下的其他设备通过此 Clash 实例上网。
allow-lan: true
# 模式rule规则模式根据规则进行分流。
mode: rule
# 日志级别info。记录一般信息和错误方便排查问题。可选silent, error, warning, info, debug。
log-level: info
# 外部控制器:用于连接 Dashboard 面板进行图形化管理。
external-controller: '127.0.0.1:9090'
# --- 这是解决所有问题的关键部分 ---
dns:
# [总开关] 启用 Clash 内置的 DNS 服务器。必须为 true 才能使后续所有 DNS 设置生效。
enable: true
# [监听地址] DNS 服务器监听的地址和端口。'0.0.0.0:53' 表示允许局域网内其他设备使用 Clash 作为 DNS 服务器。
# 如果你只希望本机使用,可以改为 '127.0.0.1:53'。
listen: 0.0.0.0:53
# [IPv6 解析] 禁用 IPv6 解析。在国内大部分网络环境下,禁用可以避免不必要的解析延迟和连接问题。
ipv6: false
# [增强模式] 强烈推荐使用 fake-ip 模式,尤其是在开启 TUN 模式时。
# 它能通过返回虚假 IP 地址来接管所有应用的 DNS 请求,从而实现基于域名的精细化规则代理。
# 这是解决非代理感知程序(如命令行工具、某些桌面应用)代理问题的最佳方案。
enhanced-mode: fake-ip
# [Fake IP 地址池] fake-ip 模式使用的虚假 IP 地址范围。通常无需修改。
fake-ip-range: 198.18.0.1/16
# 仅用于解析下方 nameserver 和 fallback 中的 DoH/DoT 域名。必须使用纯 IP 地址。
# 这里选用可靠的国内公共 DNS。
default-nameserver:
- 223.5.5.5
- 119.29.29.29
# 用于解析国内域名。并发请求,谁快用谁。
# 使用国内的 DoH (DNS over HTTPS) 服务可以有效防止运营商劫持,且解析国内 CDN 准确、迅速。
nameserver:
- https://doh.pub/dns-query # 腾讯 DNSPod (DoH)
- https://dns.alidns.com/dns-query # 阿里 DNS (DoH)
# 当 nameserver 的解析结果被 fallback-filter 判定为污染时,将使用此组 DNS。
# 必须使用国外的、无污染的加密 DNS 服务,以确保能正确解析被 GFW 干扰的域名。
fallback:
- https://dns.google/dns-query # Google DNS (DoH)
- https://1.1.1.1/dns-query # Cloudflare DNS (DoH)
- tls://8.8.4.4:853 # Google DNS (DoT)
# [抗污染过滤器] 这是实现国内外智能分流的核心。
fallback-filter:
# [启用 GeoIP 过滤] 必须为 true。
geoip: true
# [GeoIP 信任代码] 仅当 nameserver 解析出的 IP 地址地理位置为中国 (CN) 时,才信任该结果。
# 如果解析出的 IP 在国外,则判定为 DNS 污染,转而使用 fallback 组的结果。
geoip-code: CN
# [代理节点] Proxies
# 此处请填写你自己的代理服务器信息。以下为示例格式。
proxies:
- name: "My-Proxy-Server-01"
type: ss
server: server_address
port: 443
cipher: aes-256-gcm
password: "password"
udp: true
# [代理组] Proxy Groups
# 用于组织代理节点,实现负载均衡、自动故障切换等策略。
proxy-groups:
- name: "PROXY"
type: select
proxies:
- "My-Proxy-Server-01"
- DIRECT
# [规则集] Rules
# 规则按从上到下的顺序进行匹配。
rules:
# 广告拦截
- DOMAIN-SUFFIX,ad.com,REJECT
# 常用国内网站直连
- DOMAIN-SUFFIX,cn,DIRECT
- DOMAIN-SUFFIX,163.com,DIRECT
- DOMAIN-SUFFIX,126.com,DIRECT
- DOMAIN-SUFFIX,qq.com,DIRECT
- DOMAIN-SUFFIX,tencent.com,DIRECT
- DOMAIN-SUFFIX,baidu.com,DIRECT
- DOMAIN-SUFFIX,taobao.com,DIRECT
- DOMAIN-SUFFIX,alipay.com,DIRECT
- DOMAIN-SUFFIX,jd.com,DIRECT
- DOMAIN-SUFFIX,zhihu.com,DIRECT
- DOMAIN-SUFFIX,weibo.com,DIRECT
- DOMAIN-SUFFIX,bilibili.com,DIRECT
# 局域网地址直连
- IP-CIDR,192.168.0.0/16,DIRECT
- IP-CIDR,10.0.0.0/8,DIRECT
- IP-CIDR,172.16.0.0/12,DIRECT
- IP-CIDR,127.0.0.0/8,DIRECT
# 中国大陆 IP 地址直连
- GEOIP,CN,DIRECT
# 剩余所有流量走代理
- MATCH,PROXY

View File

@@ -0,0 +1,667 @@
#--------------------------------------------------------------------------------#
# Clash 专家级配置文件 (适配 Clash.Meta 核心) #
#--------------------------------------------------------------------------------#
#
# 本配置文件专为在中国大陆网络环境中使用而设计,旨在提供一套自动化、智能化、高可用性
# 的网络流量管理方案。
#
# 核心特性:
# 1. TUN 模式: 接管系统所有网络流量,实现真正的全局透明代理。
# 2. 规则集 (Rule Providers): 动态从网络加载和更新分流规则,免去手动维护烦恼。
# 3. 分割 DNS (Split DNS): 智能区分国内外域名解析,有效抗 DNS 污染,兼顾速度与准确性。
# 4. 逻辑化规则排序: 通过精心设计的规则匹配顺序,实现精确的流量控制。
#
#--------------------------------------------------------------------------------#
#----------------#
# 常规配置 #
#----------------#
# HTTP 代理端口
port: 7890
# SOCKS5 代理端口
socks-port: 7891
# 允许局域网连接,设为 true 后,局域网内其他设备可将本机作为网关使用
allow-lan: true
# 代理模式rule 表示规则模式,是本配置的核心
mode: rule
# 日志级别info 级别提供了足够的信息且不过于冗长
log-level: info
# 外部控制器,用于让 GUI 客户端 (如 Clash Verge) 或 WebUI (如 yacd) 控制 Clash 核心
external-controller: '127.0.0.1:9090'
# 外部 UI指定一个 WebUI 面板的目录,'dashboard' 是一个常见的选择
# external-ui: dashboard
#----------------#
# DNS 配置 #
#----------------#
# DNS 模块是实现智能分流和抗污染的关键
dns: # 启用 DNS 服务器
enable: true
# 监听地址,'0.0.0.0:53' 使 Clash DNS 可为局域网内其他设备服务
# 如果只为本机服务,可设为 '127.0.0.1:53'
listen: 0.0.0.0:53
# 优先使用 IPv4 DNS 解析
ipv6: false
# 增强模式fake-ip 是 TUN 模式下实现域名路由的基石
# 它会为域名分配一个虚假的 IP 地址,使 Clash 能在 IP 层识别出原始域名
enhanced-mode: fake-ip
# Fake-IP 地址池,使用 IETF 保留的地址段,避免与公网地址冲突
fake-ip-range: 198.18.0.1/16
# Fake-IP 例外名单对于这些域名Clash 将返回其真实的 IP 地址
# 这对于一些无法处理 Fake-IP 的内网服务或特定应用至关重要
fake-ip-filter:
- '*.lan'
- '*.local'
- '*.arpa'
- time.*.com
- ntp.*.com
- time.*.com
- +.market.xiaomi.com
- localhost.ptlogin2.qq.com
- '*.msftncsi.com'
- www.msftconnecttest.com
# [核心优化] 默认 DNS 服务器 (IP 格式)
# 用于解析 nameserver 和 fallback 中的 DNS 服务器域名,以及代理节点的域名。
# 必须使用纯 IP 地址,这是打破解析死锁、解决 DNS 超时问题的关键。
default-nameserver:
- 223.5.5.5
- 180.76.76.76 # 百度DNS
- 119.29.29.29
# [优化] 主 DNS 服务器列表 (国内,加密 DoH)
# 会与 Fallback DNS 并发请求,如果返回的 IP 是国内 IP则立即采用速度快
# 使用加密 DNS 替代传统 UDP DNS增强解析的稳定性和抗干扰性。
nameserver:
- 223.5.5.5 # 阿里云
- 180.76.76.76 # 百度DNS
- 180.184.1.1 # 字节跳动
- 1.2.4.8 # CNNIC DNS
# - https://dns.alidns.com/dns-query # 阿里 DoH DNS
# 备用 DNS 服务器列表 (国外,加密)
# 用于解析国外域名。当主 DNS 返回国外 IP 时Clash 会认为可能被污染,
# 并采用 Fallback DNS 的解析结果,以确保准确性
fallback:
- https://dns.google/dns-query # Google DNS (DoH)
- https://dns.cloudflare.com/dns-query # Cloudflare DNS (DoH)
- https://dns.quad9.net/dns-query # IBM quad9
- https://dns.opendns.com/dns-query # CISCO OpenDNS
- https://dns.adguard-dns.com/dns-query # AdGuard DNS
- tls://8.8.4.4:853 # Google DNS (DoT)
# Fallback DNS 例外名单,匹配此列表的域名将只使用主 DNS 解析
fallback-filter:
geoip: true
geoip-code: CN # 如果是国内的网址使用nameserver解析到的地址
rule-set: direct
# Lookup domains via specific nameservers
# 以下规则强制所有已知国内域名走最快的 IP DNS彻底解决国内域名解析超时。
nameserver-policy:
'rule-set:direct':
- 223.5.5.5
- 180.184.1.1
- 119.29.29.29
'rule-set:apple':
- 119.29.29.29
- 223.5.5.5
'rule-set:icloud':
- 119.29.29.29
- 223.5.5.5
+.hq.cmcc:
- '192.168.78.39'
+.ops.uavcmlc.com:
- '192.168.34.40'
+.uavcmlc.com:
- '192.168.34.40'
ir.hq.cmcc:
- '192.168.78.39'
oa.cdcyy.cn:
- '192.168.78.39'
# 使用系统的hosts文件
use-system-hosts: true
# 请求DoH的DNS时 使用http3访问
prefer-h3: false
# DNS也遵循规则进行解析
respect-rules: false
# 代理的DNS解析地址
proxy-server-nameserver:
- 'https://dns.google/dns-query'
- 'https://1.1.1.1/dns-query'
# 直连模式下的DNS服务器
direct-nameserver:
- 119.29.29.29 # 腾讯 DNSPod
- 114.114.114.114 # 114 DNS
- 223.5.5.5 # 阿里 DNS
# 禁止远程调试
external-controller-cors: { }
##----------------#
## TUN 模式配置 #
##----------------#
## TUN 模式通过创建虚拟网卡,在系统网络层接管所有流量
#tun:
# # 启用 TUN 模式
# enable: true
# # 协议栈,'system' 在大多数系统上性能最佳
# # 在 macOS 上或遇到兼容性问题时可尝试 'gvisor'
# stack: system
# # DNS 劫持,将所有发往 53 端口的 DNS 请求重定向到 Clash 的 DNS 服务器
# # 这是强制所有应用使用 Clash DNS 的关键
# dns-hijack:
# - 'any:53'
# # 自动路由Clash 会自动配置系统路由表,将全局流量导向 TUN 网卡
# # 开启此项后,无需再进行任何手动网络设置
# auto-route: true
# # 自动检测出口网卡,适用于大多数单网卡设备
# # 如果设备有多个物理网卡,建议关闭此项并手动指定 interface-name
# auto-detect-interface: true
# [优化] 严格路由模式
# 开启后可防止 DNS 泄露,并解决在某些系统上 DNS 劫持不生效的问题。
# 注意:此设置会使局域网内的其他设备无法访问本机。如果不需要共享代理,建议开启。
# strict-route: true
#------------------------------------------------------------------#
# 代理节点 (Proxies) 和策略组 (Proxy Groups) - 用户需自行填充 #
#------------------------------------------------------------------#
#
# 请将您的订阅链接转换后,将 proxies 和 proxy-groups 的内容粘贴到此处
proxies:
- type: vless
name: TC-HongKong
server: 43.154.83.213
port: 24443
uuid: f8702759-f402-4e85-92a6-8540d577de22
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: BFC-LosAngles
server: 154.40.34.106
port: 443
uuid: 302fbcb8-e096-46a1-906f-e879ec5ab0c5
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: xx.l4.ca.bg.107421.xyz
tls: true
udp: true
- type: vless
name: CF-HongKong-R-TCHK
server: 43.154.83.213
port: 24453
uuid: 93be1d17-8e02-449d-bb99-683ed46fbe50
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-HongKong
server: 43.154.83.213
port: 24452
uuid: cdf0b19a-9524-48d5-b697-5f10bb567734
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: Care-DEU-Dusseldorf-R-TCHK
server: 43.154.83.213
port: 24451
uuid: 9fa9b4e7-d76d-4890-92cf-ce9251a76f59
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: Care-DEU-Dusseldorf
server: 45.134.50.233
port: 443
uuid: b1417d92-998d-410b-a5f3-cf144b6f043e
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: bingo.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-KOR-Seoul
server: 140.238.14.103
port: 443
uuid: 1089cc14-557e-47ac-ac85-c07957b3cce3
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: xx.s4.cc.hh.107421.xyz
tls: true
udp: true
- type: vless
name: FV-DEU-Frankfurt
server: 43.154.83.213
port: 24444
uuid: 6055eac4-dee7-463b-b575-d30ea94bb768
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-KOR-Seoul
server: 43.154.83.213
port: 24445
uuid: 1cd284b2-d3d8-4165-b773-893f836c2b51
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-JPN-Tokyo
server: 43.154.83.213
port: 24446
uuid: bf0e9c35-84a9-460e-b5bf-2fa9f2fb3bca
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-GBR-London
server: 43.154.83.213
port: 24447
uuid: adc19390-373d-4dfc-b0f6-19fab1b6fbf6
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-SGP
server: 43.154.83.213
port: 24448
uuid: e31bc28e-8ebd-4d72-a98e-9227f26dfac3
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-KOR-Seoul-R-TCHK
server: 43.154.83.213
port: 24449
uuid: 7e27da0c-3013-4ed4-817b-50cc76a0bf81
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-JPN-Tokyo-R-TCHK
server: 43.154.83.213
port: 25000
uuid: c751811a-404f-4a05-bc41-5d572e741398
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-USA-Phoenix-R-TCHK
server: 43.154.83.213
port: 25001
uuid: fce2a9c6-1380-4ffa-ba84-6b9ec9ee2eea
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- type: vless
name: FV-USA-LosAngles
server: 43.154.83.213
port: 24450
uuid: 56fb312c-bdb0-48ca-bf66-4a2dd34040c6
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: book.107421.xyz
tls: true
udp: true
- name: CF_VIDEO_1
type: vless
server: bingo.pp.icederce.ip-ddns.com
port: 8443
uuid: 86c50e3a-5b87-49dd-bd20-03c7f2735e40
udp: false
tls: true
network: ws
servername: pp.icederce.ip-ddns.com
ws-opts:
path: "/?ed=2560"
headers:
Host: pp.icederce.ip-ddns.com
- name: CF_VIDEO_2
type: vless
server: bingo.icederce.ip-ddns.com
port: 8443
uuid: 86c50e3a-5b87-49dd-bd20-03c7f2735e40
udp: false
tls: true
network: ws
servername: pp.icederce.ip-ddns.com
ws-opts:
path: "/?ed=2560"
headers:
Host: pp.icederce.ip-ddns.com
- type: socks5
name: TC-CHN-Shanghai
server: 42.192.52.227
port: 22887
username: zeaslity
password: a1f090ea-e39c-49e7-a3be-9af26b6ce563
udp: true
- type: vless
name: Oracle-JPN-Tokyo-R-OSel
server: 140.238.14.103
port: 20443
uuid: 21dab95b-088e-47bd-8351-609fd23cb33c
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: xx.t2.ll.c0.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-JPN-Osaka-R-OSel
server: 140.238.14.103
port: 21443
uuid: 4c2dd763-56e5-408f-bc8f-dbf4c1fe41f9
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: xx.o1.vl.s4.107421.xyz
tls: true
udp: true
- type: vless
name: Oracle-USA-Phoneix-R-OSel
server: 140.238.14.103
port: 22443
uuid: de576486-e254-4d9d-949a-37088358ec23
skip-cert-verify: false
network: tcp
flow: xtls-rprx-vision
servername: xx.p2.vl.s4.107421.xyz
tls: true
udp: true
- { "type": "socks5","name": "onetools-35-71","server": "192.168.35.71","port": 22888,"username": "zeaslity","password": "password","udp": true }
proxy-groups:
- name: 🚀 节点选择
type: select
proxies:
- TC-HongKong
- BFC-LosAngles
- FV-HongKong
- Care-DEU-Dusseldorf-R-TCHK
- Oracle-KOR-Seoul-R-TCHK
- Oracle-JPN-Tokyo-R-TCHK
- Oracle-USA-Phoenix-R-TCHK
- Care-DEU-Dusseldorf
- Oracle-KOR-Seoul
- FV-DEU-Frankfurt
- FV-KOR-Seoul
- FV-JPN-Tokyo
- FV-GBR-London
- FV-USA-LosAngles
- CF-HongKong-R-TCHK
- FV-SGP
- CF_VIDEO_1
- CF_VIDEO_2
- Oracle-JPN-Tokyo-R-OSel
- Oracle-JPN-Osaka-R-OSel
- Oracle-USA-Phoneix-R-OSel
- TC-CHN-Shanghai
- ♻️ 自动选择
- DIRECT
- name: ♻️ 自动选择
type: url-test
url: https://www.gstatic.com/generate_204
interval: 300
tolerance: 50
proxies:
- BFC-LosAngles
- TC-HongKong
- Oracle-JPN-Tokyo-R-TCHK
- Oracle-USA-Phoenix-R-TCHK
- Oracle-KOR-Seoul
- Care-DEU-Dusseldorf
- Oracle-JPN-Tokyo-R-OSel
- Oracle-JPN-Osaka-R-OSel
- Oracle-USA-Phoneix-R-OSel
- name: 🌍 国外媒体
type: select
proxies:
- 🚀 节点选择
- ♻️ 自动选择
- 🎯 全球直连
- name: 📲 电报信息
type: select
proxies:
- 🚀 节点选择
- ♻️ 自动选择
- 🎯 全球直连
- name: Ⓜ️ 微软服务
type: select
proxies:
- 🎯 全球直连
- 🚀 节点选择
- name: 🍎 苹果服务
type: select
proxies:
- 🎯 全球直连
- 🚀 节点选择
- name: 💩 工作直连
type: select
proxies:
- DIRECT
- onetools-35-71
- name: 💩 工作代理
type: select
proxies:
- onetools-35-71
- DIRECT
- name: 🎯 全球直连
type: select
proxies:
- DIRECT
- 🚀 节点选择
- ♻️ 自动选择
- name: 🛑 全球拦截
type: select
proxies:
- REJECT
- DIRECT
- name: 🍃 应用净化
type: select
proxies:
- REJECT
- DIRECT
- name: 🐟 漏网之鱼
type: select
proxies:
- 🚀 节点选择
- 🎯 全球直连
- ♻️ 自动选择
- TC-HongKong
- Oracle-KOR-Seoul
#----------------#
# 规则集定义 #
#----------------#
# Rule Providers 用于从网络动态加载规则列表,实现规则的自动更新
rule-providers: # 广告、追踪器、恶意域名规则集
reject:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/reject.txt"
path: ./ruleset/reject.yaml
interval: 604800 # 更新间隔: 7天
# iCloud 服务规则集
icloud:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/icloud.txt"
path: ./ruleset/icloud.yaml
interval: 604800
# 苹果服务规则集
apple:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/apple.txt"
path: ./ruleset/apple.yaml
interval: 604800
# 谷歌服务规则集
google:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/google.txt"
path: ./ruleset/google.yaml
interval: 604800
# 需要代理的域名规则集
proxy:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/proxy.txt"
path: ./ruleset/proxy.yaml
interval: 604800
# 需要直连的域名规则集
direct:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/direct.txt"
path: ./ruleset/direct.yaml
interval: 604800
# 私有网络域名规则集
private:
type: http
behavior: domain
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/private.txt"
path: ./ruleset/private.yaml
interval: 604800
# 中国大陆 IP 段规则集
cncidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/cncidr.txt"
path: ./ruleset/cncidr.yaml
interval: 604800
# 局域网 IP 段规则集
lancidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/lancidr.txt"
path: ./ruleset/lancidr.yaml
interval: 604800
# Telegram 服务器 IP 段规则集
telegramcidr:
type: http
behavior: ipcidr
url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/telegramcidr.txt"
path: ./ruleset/telegramcidr.yaml
interval: 604800
#----------------#
# 分流规则 #
#----------------#
# 规则按从上到下的顺序进行匹配,一旦匹配成功,后续规则将不再执行
rules: # 1. 广告、追踪器拦截规则 (最高优先级)
# 直接拒绝连接,提升网页加载速度和隐私保护
- RULE-SET,reject,REJECT
# [优化] 核心国内流量直连规则 (IP 维度)
# 将中国大陆的 IP 地址段置于高优先级。这是解决国内网站访问缓慢和超时的关键。
# 任何目标地址在此列表内的连接都会被立即直连,无需进行 DNS 查询和 GEOIP 判断。
- RULE-SET,cncidr,DIRECT
- # 5. 基于地理位置的补充规则
- # 所有目标 IP 位于中国大陆的流量都直连
- # 这条规则作为对域名规则的补充,确保国内 IP 流量的直连
- GEOIP,CN,DIRECT
# 工作代理模式
- DOMAIN-SUFFIX,cdcyy.cn,💩 工作直连
- DOMAIN-SUFFIX,hq.cmcc,💩 工作直连
- DOMAIN-SUFFIX,wdd.io,💩 工作直连
- DOMAIN-SUFFIX,harbor.cdcyy.com.cn,💩 工作直连
- DOMAIN-SUFFIX,ecs.io,💩 工作直连
- DOMAIN-SUFFIX,uavcmlc.com,💩 工作直连
# 2. 本地/内网流量直连规则
# 确保局域网设备和服务的访问不受代理影响
- RULE-SET,lancidr,DIRECT
- RULE-SET,private,DIRECT
# 3. 明确的国内服务直连规则
# 优先匹配已知需要直连的域名和服务 (Apple, iCloud 等)
- RULE-SET,icloud,🍎 苹果服务
- RULE-SET,apple,🍎 苹果服务
- RULE-SET,direct,🎯 全球直连
# 4. 明确的代理规则
# 匹配已知需要代理的服务 (Google, Telegram, 以及其他国际服务)
- RULE-SET,google,🌍 国外媒体
- RULE-SET,telegramcidr,📲 电报信息
- RULE-SET,proxy,🌍 国外媒体
# 6. 最终的兜底规则 (最低优先级)
# 所有未匹配到以上任何规则的流量,都走代理
# 这是确保未知的新网站或国外服务能正常访问的关键
- MATCH,🐟 漏网之鱼

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
请查阅clash的配置规格修改上述的配置文件要求对配置进行详细的中文注释说明。
## 参考配置
- https://en.clash.wiki/configuration/getting-started.html
## 修改功能说明,配置最终需要满足如下条件
- 开启TUN模式规则模式下在GFW之内使用
- 访问CN的IP及网址是直连状态,无需任何代理,使用223.5.5.5 119.29.29.29作为DNS
- 访问特殊网址规则,按照clash中规则定义进行访问
- 访问规则之外的CN之外的IP及网址,走代理访问,使用8.8.8.8 1.1.1.1作为DNS
- 规则模式下,在GFW之内使用
- 同上述规则
请查阅clash的配置规格修改上述的配置文件要求对配置进行详细的中文注释说明。
## 参考配置
- https://en.clash.wiki/configuration/getting-started.html
请分析上述的配置文件,无论是否开启TUN模式,在规则模式之后
访问国内的网址出现大量错误,典型的错误如下
[TCP] dial DIRECT (match RuleSet/cncidr) 127.0.0.1:50173 --> www.zhihu.com:443 error: dns resolve failed: context deadline exceeded
[TCP] dial 🎯 全球直连 (match RuleSet/direct) 127.0.0.1:56064 --> static.zhihu.com:443 error: dns resolve failed: context deadline exceeded
访问网址非常卡顿,请给出解决方案.考虑自建DNS服务器是否能够解决问题

View File

@@ -0,0 +1,277 @@
# AdGuard Home 配置文件
# 官方文档参考: https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration
# HTTP/Web 界面相关设置
#http:
# # pprof (性能分析) 相关设置
# pprof:
# port: 6060 # pprof 服务的端口
# enabled: false # 是否启用 pprof默认为 false建议保持禁用
# address: https://xx.tc.hk.go.107421.xyz # Web 界面的监听地址和端口
# session_ttl: 720h # Web 界面登录会话的有效时间 (720小时 = 30天)
# 用户认证设置
users:
# 在这里添加您的用户。您必须手动生成密码的 bcrypt 哈希值。
# 例如,在 Linux 系统中,您可以使用 htpasswd 工具生成:
# htpasswd -nb your_username your_password
# 然后将输出的整行 (例如: your_username:$apr1$....) 替换掉下面的内容。
- name: zeaslity # 您的用户名
password: "$2y$05$b8Vbq3FrGqFNDceFTPFf.eRVYznIineyqtVr60hURTmFKLwdvadCi" # 将这里替换为您生成的密码哈希值
# 认证尝试次数与锁定时间
auth_attempts: 5 # 允许的最大登录失败次数
block_auth_min: 15 # 登录失败次数过多后,锁定登录的分钟数
http_proxy: "" # HTTP 代理地址,通常留空
language: "zh-cn" # Web 界面语言,留空则自动检测浏览器语言
theme: auto # Web 界面主题 (auto, light, dark)
# DNS 服务器相关设置
dns:
bind_hosts:
- 0.0.0.0 # DNS 服务器监听的 IP 地址127.0.0.1 表示只允许本机访问
port: 53 # DNS 服务器监听的端口53是标准DNS端口
anonymize_client_ip: false # 是否在将 EDNS Client Subnet (ECS) 信息转发给上游时匿名化客户端 IP
# DNS 请求速率限制
ratelimit: 40 # 每个客户端每秒允许的最大 DNS 请求数
ratelimit_subnet_len_ipv4: 24 # 用于速率限制的 IPv4 子网掩码长度 (24表示C类地址)
ratelimit_subnet_len_ipv6: 56 # 用于速率限制的 IPv6 子网掩码长度
ratelimit_whitelist: [] # 不受速率限制的 IP 地址列表
refuse_any: true # 是否拒绝类型为 ANY 的 DNS 请求,以防止被用于 DNS 放大攻击
# 上游 DNS 服务器设置
upstream_dns:
- https://dns.google/dns-query # Google DNS (DoH)
- https://1.1.1.1/dns-query # Cloudflare DNS (DoH)
- tls://8.8.4.4:853 # Google DNS (DoT)
upstream_dns_file: "" # 从文件中加载上游 DNS 服务器列表,留空则不使用
# 引导 DNS 服务器 (用于解析上游 DoH/DoT/DoQ 的域名)
bootstrap_dns:
- 1.1.1.1
- 8.8.8.8
# 备用 DNS 服务器,当所有上游服务器都不可用时使用,可以留空
fallback_dns:
- 1.1.1.1
- 8.8.8.8
# 上游服务器查询模式
upstream_mode: load_balance # "load_balance": 负载均衡, "parallel": 并行请求, "fastest_ip": 最快IP模式
fastest_timeout: 1s # 在 "fastest_ip" 模式下,等待响应的超时时间
# 访问控制
allowed_clients: [] # 允许访问的客户端列表,留空表示允许所有
disallowed_clients: [] # 禁止访问的客户端列表
# 默认拦截的域名
blocked_hosts:
- version.bind
- id.server
- hostname.bind
# 信任的反向代理 IP 地址范围
trusted_proxies:
- 127.0.0.0/8
- ::1/128
# DNS 缓存设置
cache_enabled: true # 是否启用 DNS 缓存
cache_size: 419430400 # 缓存大小 (字节, 这里是 400MB)
cache_ttl_min: 0 # 覆盖 DNS 记录的最小 TTL (秒)0 表示不覆盖
cache_ttl_max: 0 # 覆盖 DNS 记录的最大 TTL (秒)0 表示不覆盖
cache_optimistic: false # 是否启用乐观缓存 (返回过期的缓存记录并异步刷新)
bogus_nxdomain: [] # 将指定的 IP 地址的 NXDOMAIN 响应视为伪造响应
aaaa_disabled: false # 是否禁用对 IPv6 (AAAA) 记录的解析
enable_dnssec: false # 是否启用 DNSSEC 支持
# EDNS Client Subnet (ECS) 设置
edns_client_subnet:
custom_ip: "" # 自定义发送给上游的 IP 地址
enabled: false # 是否启用 ECS
use_custom: false # 是否使用上面定义的 custom_ip
max_goroutines: 300 # 处理 DNS 请求的最大并发协程数
handle_ddr: true # 是否处理 Discovery of Designated Resolvers (DDR)
# IPSet 设置 (需要内核支持)
ipset: []
ipset_file: ""
bootstrap_prefer_ipv6: false # 引导DNS是否优先使用 IPv6
upstream_timeout: 10s # 上游 DNS 请求的超时时间
private_networks: [] # 自定义的私有网络范围
use_private_ptr_resolvers: true # 是否为私有地址使用私有反向DNS解析器
local_ptr_upstreams: [] # 用于PTR请求的本地上游DNS
# DNS64 设置 (用于 NAT64)
use_dns64: false
dns64_prefixes: []
# HTTP/3 相关
serve_http3: false # 是否通过 HTTP/3 提供 DoH 服务
use_http3_upstreams: true # 是否使用 HTTP/3 连接到上游 DoH 服务器
serve_plain_dns: false # 是否为 DoH 和 DoT 客户端提供普通DNS (53端口)
hostsfile_enabled: true # 是否使用操作系统的 hosts 文件
# 待处理请求队列 (防止重复向上游请求)
pending_requests:
enabled: true
# TLS (加密) 相关设置
tls:
enabled: true # 是否启用 TLS (HTTPS, DoH, DoT, DoQ)
server_name: xx.tc.hk.go.107421.xyz # 您的服务器域名
force_https: true # 是否强制将 HTTP 请求重定向到 HTTPS
port_https: 443 # HTTPS 端口
port_dns_over_tls: 253 # DNS-over-TLS (DoT) 端口
port_dns_over_quic: 253 # DNS-over-QUIC (DoQ) 端口
port_dnscrypt: 0 # DNSCrypt 端口0表示禁用
dnscrypt_config_file: "" # DNSCrypt 配置文件路径
allow_unencrypted_doh: false # 是否允许通过未加密的 HTTP 接收 DoH 请求
# 证书和私钥设置
certificate_chain: "" # 证书链内容 (如果直接粘贴内容)
private_key: "" # 私钥内容 (如果直接粘贴内容)
certificate_path: /root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer # 证书文件路径
private_key_path: /root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key # 私钥文件路径
strict_sni_check: true # 是否为 DoT 和 DoH 启用严格的 SNI 检查
# 查询日志设置
querylog:
dir_path: "" # 日志文件存储目录,留空为 AdGuard Home 工作目录
ignored: [] # 不记录日志的域名列表
interval: 2160h # 日志轮转周期 (90天)
size_memory: 1000 # 在内存中保留的最新日志条数
enabled: true # 是否启用查询日志
file_enabled: false # 是否将日志写入文件
# 统计信息设置
statistics:
dir_path: "" # 统计数据存储目录
ignored: [] # 不计入统计的域名列表
interval: 24h # 统计信息保留时长
enabled: true # 是否启用统计功能
# 过滤规则列表设置
filters:
# AdGuard DNS 过滤器
- enabled: true
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_1.txt
name: AdGuard DNS filter
id: 1
# AdAway 默认黑名单 (已禁用)
- enabled: false
url: https://adguardteam.github.io/HostlistsRegistry/assets/filter_2.txt
name: AdAway Default Blocklist
id: 2
whitelist_filters: [] # 白名单过滤列表
user_rules: [] # 用户自定义过滤规则
# DHCP 服务器设置 (当前禁用)
dhcp:
enabled: false
interface_name: ""
local_domain_name: lan
dhcpv4:
gateway_ip: ""
subnet_mask: ""
range_start: ""
range_end: ""
lease_duration: 86400
icmp_timeout_msec: 1000
options: []
dhcpv6:
range_start: ""
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
# 内容过滤总设置
filtering:
blocking_ipv4: "" # 当域名被拦截时,返回的 IPv4 地址 (留空为默认)
blocking_ipv6: "" # 当域名被拦截时,返回的 IPv6 地址 (留空为默认)
# 按服务拦截
blocked_services:
schedule:
time_zone: Local # 时间表使用的时区
ids: [] # 要拦截的服务 ID 列表
protection_disabled_until: null # 临时禁用保护直至指定时间
# 安全搜索设置
safe_search:
enabled: false # 是否为搜索引擎强制启用安全搜索
bing: true
duckduckgo: true
ecosia: true
google: true
pixabay: true
yandex: true
youtube: true
# 拦截模式
blocking_mode: default # default: 默认模式; nxdomain: 返回 NXDOMAIN; null_ip: 返回 0.0.0.0; custom_ip: 返回自定义 IP
parental_block_host: family-block.dns.adguard.com # 家长控制拦截主机
safebrowsing_block_host: standard-block.dns.adguard.com # 安全浏览拦截主机
rewrites: [] # DNS 重写规则
safe_fs_patterns: [] # 文件系统安全模式
# 各种功能的缓存大小 (字节)
safebrowsing_cache_size: 104857600 # 安全浏览 (100MB)
safesearch_cache_size: 104857600 # 安全搜索 (100MB)
parental_cache_size: 1048576 # 家长控制 (1MB)
cache_time: 180 # 缓存时间 (秒)
filters_update_interval: 24 # 过滤器自动更新间隔 (小时)
blocked_response_ttl: 10 # 被拦截域名的 DNS 响应 TTL (秒)
# 总开关
filtering_enabled: true # 是否启用广告过滤
parental_enabled: false # 是否启用家长控制
safebrowsing_enabled: false # 是否启用安全浏览
protection_enabled: true # AdGuard 总保护开关
# 客户端设置
clients:
runtime_sources:
whois: true
arp: true
rdns: false
dhcp: true
hosts: true
persistent: [] # 持久化客户端设置
# 一般日志设置
log:
enabled: true # 是否启用 AdGuard Home 本身的日志记录
file: "" # 日志文件路径,留空为 stdout
max_backups: 0 # 保留的旧日志文件数量
max_size: 100 # 每个日志文件的最大大小 (MB)
max_age: 3 # 旧日志文件保留天数
compress: false # 是否压缩旧日志文件
local_time: false # 是否使用本地时间记录日志
verbose: false # 是否启用详细日志模式
# 操作系统相关设置
os:
group: ""
user: ""
rlimit_nofile: 0
schema_version: 30 # 配置文件架构版本,请勿手动修改

View File

@@ -0,0 +1,23 @@
请详细参考AdGuard官方的教程,
- https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration
基于上文给出的配置文件,做出如下的修改
- 针对每一行的配置,给出中文的注释说明
- 实现用户名密码访问控制台,禁止非登录访问
htpasswd -nb -B zeaslity MSuper@123.IO9
zeaslity:$2y$05$b8Vbq3FrGqFNDceFTPFf.eRVYznIineyqtVr60hURTmFKLwdvadCi
dnslookup www.youtube.com https://xx.tc.hk.go.107421.xyz/dns-query
bitsflowcx1@outlook.com
urh!ude9zdf5njy0ZJN
aaa20250822

View File

@@ -18,7 +18,32 @@
"accounts": [ "accounts": [
{ {
"user": "zeaslity", "user": "zeaslity",
"pass": "lovemm.23" "pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
}
],
"udp": true,
"allowTransparent": false
}
},
{
"tag": "proxy-germany",
"port": 22889,
"listen": "0.0.0.0",
"protocol": "socks",
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
],
"routeOnly": false
},
"settings": {
"auth": "password",
"accounts": [
{
"user": "zeaslity",
"pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
} }
], ],
"udp": true, "udp": true,
@@ -43,7 +68,7 @@
"accounts": [ "accounts": [
{ {
"user": "zeaslity", "user": "zeaslity",
"pass": "lovemm.23" "pass": "a1f090ea-e39c-49e7-a3be-9af26b6ce563"
} }
], ],
"udp": true, "udp": true,
@@ -62,7 +87,7 @@
"port": 443, "port": 443,
"users": [ "users": [
{ {
"id": "717c40e7-efeb-45bc-8f5e-4e6e7d9eea18", "id": "0c5741d0-76a9-4945-9c1d-14647afcce24",
"email": "t@t.tt", "email": "t@t.tt",
"security": "auto", "security": "auto",
"encryption": "none", "encryption": "none",
@@ -89,6 +114,43 @@
"concurrency": -1 "concurrency": -1
} }
}, },
{
"tag": "proxy-germany",
"protocol": "vless",
"settings": {
"vnext": [
{
"address": "45.134.50.233",
"port": 443,
"users": [
{
"id": "b1417d92-998d-410b-a5f3-cf144b6f043e",
"email": "t@t.tt",
"security": "auto",
"encryption": "none",
"flow": "xtls-rprx-vision"
}
]
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"allowInsecure": false,
"serverName": "bingo.107421.xyz",
"alpn": [
"h2"
],
"fingerprint": "firefox"
}
},
"mux": {
"enabled": false,
"concurrency": -1
}
},
{ {
"tag": "direct", "tag": "direct",
"protocol": "freedom", "protocol": "freedom",
@@ -123,6 +185,13 @@
"proxy-socks" "proxy-socks"
] ]
}, },
{
"type": "field",
"outboundTag": "proxy-germany",
"inboundTag": [
"proxy-germany"
]
},
{ {
"type": "field", "type": "field",
"outboundTag": "direct", "outboundTag": "direct",

View File

@@ -0,0 +1,4 @@
export all_proxy=socks5://zeaslity:a1f090ea-e39c-49e7-a3be-9af26b6ce563@42.192.52.227:22888

View File

@@ -0,0 +1,70 @@
{
"log": {
"loglevel": "error"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"tag": "proxy",
"settings": {
"clients": [
{
"id": "302fbcb8-e096-46a1-906f-e879ec5ab0c5",
"flow": "xtls-rprx-vision",
"email": "bfc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/xx.l4.ca.bg.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/xx.l4.ca.bg.107421.xyz_ecc/xx.l4.ca.bg.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
},
{
"protocol": "freedom",
"tag": "proxy"
}
]
}

View File

@@ -0,0 +1,105 @@
#!/bin/bash
#
# UFW 防火墙配置脚本
# 适用于 Ubuntu 22.04 LTS
#
# --- 脚本开始 ---
# 输出提示信息,告知用户脚本即将开始
echo "================================================="
echo " UFW 防火墙自动配置脚本即将开始... "
echo "================================================="
echo ""
# --- 1. 重置 UFW ---
# 为了避免与旧规则冲突首先重置UFW到初始状态。
# --force 选项可以在没有交互提示的情况下完成重置。
echo "--- 步骤 1: 重置UFW防火墙清除所有现有规则 ---"
echo "执行命令: sudo ufw --force reset"
sudo ufw --force reset
echo "-------------------------------------------------"
echo ""
# --- 3. 开放特定端口 (对所有IP) ---
# 为公共服务开放指定的端口。
echo "--- 步骤 3: 向所有IP开放指定的TCP/UDP端口 ---"
# 开放 HTTPS (443) 端口
echo "开放端口: 443/tcp 和 443/udp"
echo "执行命令: sudo ufw allow 443"
sudo ufw allow 443
# 开放自定义 (22333) 端口
echo "开放端口: 22333/tcp 和 22333/udp"
echo "执行命令: sudo ufw allow 22333"
sudo ufw allow 22333
# 开放自定义 (25000-26000) 端口范围
echo "开放端口范围: 25000:26000/tcp"
echo "执行命令: sudo ufw allow 25000:26000/tcp"
sudo ufw allow 25000:26000/tcp
echo "开放端口范围: 25000:26000/udp"
echo "执行命令: sudo ufw allow 25000:26000/udp"
sudo ufw allow 25000:26000/udp
echo "-------------------------------------------------"
echo ""
# --- 4. 添加IP白名单 ---
# 为受信任的IP地址开放所有权限方便管理和访问。
echo "--- 步骤 4: 为白名单IP开放所有协议和端口 ---"
WHITELIST_IPS=(
"42.192.52.227/32"
"43.154.83.213/32"
"144.24.164.121/32"
"132.145.87.10/32"
"140.238.0.0/16"
)
# 遍历IP列表并添加规则
for ip in "${WHITELIST_IPS[@]}"; do
echo "添加白名单IP: ${ip}"
echo "执行命令: sudo ufw allow from ${ip} to any"
sudo ufw allow from ${ip} to any
done
echo "-------------------------------------------------"
echo ""
# --- 2. 设置默认策略 ---
# 这是防火墙的基础安全策略。
# deny incoming: 拒绝所有未经明确允许的进入流量。
# allow outgoing: 允许服务器主动发起的任何出站流量。
echo "--- 步骤 2: 设置默认防火墙策略 ---"
echo "设置默认拒绝所有进入流量..."
echo "执行命令: sudo ufw default deny incoming"
sudo ufw default deny incoming
echo "设置默认允许所有出口流量..."
echo "执行命令: sudo ufw default allow outgoing"
sudo ufw default allow outgoing
echo "-------------------------------------------------"
echo ""
# --- 5. ICMP (Ping) 请求处理 ---
# UFW的默认拒绝策略(deny incoming)已经包含了对ICMP的阻止。
# 而上一步的IP白名单规则(ufw allow from <IP>)允许了这些IP的所有协议因此它们可以ping通。
# 这精确地实现了“禁止非白名单IP的ICMP请求”的目标。
echo "--- 步骤 5: ICMP (Ping) 请求说明 ---"
echo "无需额外规则。默认的'deny incoming'策略已阻止非白名单IP的ping请求。"
echo "-------------------------------------------------"
echo ""
# --- 6. 启用 UFW ---
# 应用以上所有规则,正式启动防火墙。
echo "--- 步骤 6: 启用UFW防火墙 ---"
echo "执行命令: sudo ufw enable"
sudo ufw enable
echo "-------------------------------------------------"
echo ""
# --- 7. 显示最终状态 ---
# 显示详细的防火墙状态,以便用户检查配置是否正确。
echo "--- 步骤 7: 显示当前防火墙状态 ---"
echo "执行命令: sudo ufw status verbose"
sudo ufw status verbose
echo "-------------------------------------------------"
echo ""
echo "================================================="
echo " 防火墙配置完成!请检查上面的状态。 "
echo "================================================="

View File

@@ -0,0 +1,14 @@
你是一个精通ubuntu22.04系统下ufw使用的计算机高手请实现一个shell脚本实现如下的功能
- 所有的命令均有清晰的中文注释
- 所有执行的命令均使用echo进行打印输出
- 开放访问目的地端口为443 22333 25000-26000的tcp udp到0.0.0.0/0
- 对以下IP开放全部协议及端口
- 42.192.52.227/32
- 43.154.83.213/32
- 144.24.164.121/32
- 132.145.87.10/32
- 140.238.0.0/16
- 禁止其他端口的访问流量
- 禁止非白名单IP的ICMP请求
- 允许全部的出口流量

View File

@@ -9,7 +9,7 @@
"settings": { "settings": {
"clients": [ "clients": [
{ {
"id": "717c40e7-efeb-45bc-8f5e-4e6e7d9eea18", "id": "0c5741d0-76a9-4945-9c1d-14647afcce24",
"flow": "xtls-rprx-vision", "flow": "xtls-rprx-vision",
"email": "cc@gg.com", "email": "cc@gg.com",
"level": 0 "level": 0
@@ -72,7 +72,7 @@
"clients": [ "clients": [
{ {
"email": "ice@qq.com", "email": "ice@qq.com",
"password": "Vad3.123a)asd@1234-as.dasd.asdazzS.123", "password": "Vad3.123acasd-1234-as.dAsd.asdazzS.123",
"level": 0 "level": 0
} }
] ]

View File

@@ -0,0 +1,72 @@
{
"inbounds": [
{
"protocol": "http",
"port": 2234,
"listen": "0.0.0.0",
"tag": "proxy-http"
},
{
"tag": "proxy",
"protocol": "socks",
"listen": "0.0.0.0",
"port": 1234,
"settings": {
"auth": "noauth",
"udp": true,
"ip": "127.0.0.1",
"userLevel": 0
}
},
{
"protocol": "socks",
"tag": "cloudflare",
"listen": "0.0.0.0",
"port": 1235,
"settings": {
"auth": "noauth",
"udp": true,
"userLevel": 0
}
}
],
"outbounds": [
{
"tag": "cloudflare",
"protocol": "socks",
"settings": {
"servers": [
{
"address": "127.0.0.1",
"port": 40000,
"level": 0
}
]
}
},
{
"tag": "proxy",
"protocol": "freedom"
}
],
"routing": {
"domainStrategy": "IPIfNonMatch",
"rules": [
{
"type": "field",
"inboundTag": [
"cloudflare",
"proxy-http"
],
"outboundTag": "cloudflare"
},
{
"type": "field",
"inboundTag": [
"proxy"
],
"outboundTag": "proxy"
}
]
}
}

View File

@@ -9,7 +9,7 @@
"settings": { "settings": {
"clients": [ "clients": [
{ {
"id": "12491d80-745c-4e26-a58b-edf584afb208", "id": "f1335f03-8c67-43c4-ac47-88697e917cc0",
"flow": "xtls-rprx-vision", "flow": "xtls-rprx-vision",
"email": "cc@Phoenix-arm02.com", "email": "cc@Phoenix-arm02.com",
"level": 0 "level": 0

View File

@@ -0,0 +1,28 @@
{
"log": {
"loglevel": "warning"
},
"inbounds": [
{
"listen": "0.0.0.0",
"port": 31234,
"protocol": "vmess",
"settings": {
"clients": [
{
"id": "7d390fdf-0a48-4a3e-b18c-b18db36c6f23"
}
]
},
"streamSettings": {
"network": "tcp"
}
}
],
"outbounds": [
{
"protocol": "freedom",
"tag": "direct"
}
]
}

View File

@@ -0,0 +1,64 @@
{
"log": {
"loglevel": "warning"
},
"inbounds": [
{
"port": 443,
"protocol": "vless",
"settings": {
"clients": [
{
"id": "b1417d92-998d-410b-a5f3-cf144b6f043e",
"flow": "xtls-rprx-vision",
"email": "cc@vless.com",
"level": 0
}
],
"decryption": "none",
"fallbacks": [
{
"dest": "/dev/shm/h2c.sock",
"xver": 2,
"alpn": "h2"
},
{
"dest": "/dev/shm/h1.sock",
"xver": 2
}
]
},
"streamSettings": {
"network": "tcp",
"security": "tls",
"tlsSettings": {
"certificates": [
{
"ocspStapling": 3600,
"certificateFile": "/root/.acme.sh/bingo.107421.xyz_ecc/fullchain.cer",
"keyFile": "/root/.acme.sh/bingo.107421.xyz_ecc/bingo.107421.xyz.key"
}
],
"minVersion": "1.2",
"cipherSuites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"alpn": [
"h2",
"http/1.1"
]
}
},
"sniffing": {
"enabled": true,
"destOverride": [
"http",
"tls"
]
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

View File

@@ -0,0 +1 @@
当前 IP45.134.50.233 来自于:罗马尼亚 蒂米什县 蒂米什瓦拉 bunea.eu

View File

@@ -9,7 +9,7 @@
"settings": { "settings": {
"clients": [ "clients": [
{ {
"id": "1dde748d-32ee-4ed7-b70b-f2376d34e7e5", "id": "1089cc14-557e-47ac-ac85-c07957b3cce3",
"flow": "xtls-rprx-vision", "flow": "xtls-rprx-vision",
"email": "cc@vless.com", "email": "cc@vless.com",
"level": 0 "level": 0
@@ -82,7 +82,7 @@
"clients": [ "clients": [
{ {
"email": "general@trojan-h2-tokyo2", "email": "general@trojan-h2-tokyo2",
"password": "ADasfsaad12.21312@113.adsaddasds.112321", "password": "ADaSfsaad12.21312-.1Ac13.adsCCddasds.112321",
"level": 0 "level": 0
} }
] ]

Binary file not shown.

View File

@@ -0,0 +1,58 @@
# Restrict access to the website by IP or wrong domain name) and return 400
server {
listen unix:/dev/shm/h2c.sock proxy_protocol default_server;
# listen 5000;
http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
server {
# listen 5001;
listen unix:/dev/shm/h1.sock proxy_protocol default_server;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
# HTTP1 UDS listener
server {
listen unix:/dev/shm/h1.sock proxy_protocol;
# listen 5001;
server_name bingo.107421.xyz;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html/;
index index.html index.htm;
}
}
# HTTP2 UDS listener
server {
listen unix:/dev/shm/h2c.sock proxy_protocol;
http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name bingo.107421.xyz;
# grpc settings
# grpc_read_timeout 1h;
# grpc_send_timeout 1h;
# grpc_set_header X-Real-IP $remote_addr;
# Decoy website
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html;
index index.html index.htm;
}
}

View File

@@ -0,0 +1,57 @@
# Restrict access to the website by IP or wrong domain name) and return 400
server {
listen unix:/dev/shm/h2c.sock http2 proxy_protocol default_server;
# listen 5000;
# http2 on;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
server {
# listen 5001;
listen unix:/dev/shm/h1.sock proxy_protocol default_server;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name _;
return 400 "not allowed";
}
# HTTP1 UDS listener
server {
listen unix:/dev/shm/h1.sock proxy_protocol;
# listen 5001;
server_name bingo.107421.xyz;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html/;
index index.html index.htm;
}
}
# HTTP2 UDS listener
server {
listen unix:/dev/shm/h2c.sock http2 proxy_protocol;
set_real_ip_from unix:;
real_ip_header proxy_protocol;
server_name bingo.107421.xyz;
# grpc settings
# grpc_read_timeout 1h;
# grpc_send_timeout 1h;
# grpc_set_header X-Real-IP $remote_addr;
# Decoy website
location / {
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; # enable HSTS
root /var/www/html;
index index.html index.htm;
}
}

View File

@@ -17,3 +17,26 @@ echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority:
sudo apt update sudo apt update
sudo apt install -y nginx sudo apt install -y nginx
mkdir -p /var/www/html/
# 上传文件
mv dist.zip /var/www/html/
cd /var/www/html/
unzip dist.zip
chown -R www-data:www-data /var/www/html/
chmod -R 755 /var/www/html/
# nginx配置
systemctl restart nginx
systemctl enable nginx
systemctl restart xray
systemctl enable xray
journalctl -u nginx -n 100 -f
journalctl -u xray -n 100 -f

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,28 @@
vmess://eyJ2IjoiMiIsInBzIjoidXMtY2VudGUtZnJlZSIsImFkZCI6Im5vcnRoZmxhbmsuMTA3NDIxLnh5eiIsInBvcnQiOjQ0MywiaWQiOiJkZTA0YWRkOS01YzY4LThiYWItOTUwYy0wOGNkNTMyMGRmMTgiLCJhaWQiOjAsInNjeSI6ImF1dG8iLCJuZXQiOiJ3cyIsInBhdGgiOiIvdm1lc3MiLCJ0bHMiOiJ0bHMifQ== vless://f8702759-f402-4e85-92a6-8540d577de22@43.154.83.213:24443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#TC-HongKong
trojan://Vad3.123a%29asd1234-asdasd.asdazzS.123@43.154.83.213:443?flow=xtls-rprx-vision&security=tls&sni=xx.tc.hk.go.107421.xyz&alpn=h2&fp=firefox&type=http&path=trh2#TC-HK-Trojan vless://302fbcb8-e096-46a1-906f-e879ec5ab0c5@154.40.34.106:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.l4.ca.bg.107421.xyz#BFC-LosAngles
vless://b4bdf874-8c03-5bd8-8fd7-5e409dfd82c0@43.154.83.213:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=book.107421.xyz&alpn=h2%2Chttp%2F1.1&fp=firefox&type=tcp&headerType=none#TC-HK-Vless vless://9fa9b4e7-d76d-4890-92cf-ce9251a76f59@43.154.83.213:24451?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Care-DEU-Dusseldorf-R-TCHK
vless://1dde748d-32ee-4ed7-b70b-f2376d34e7e5@132.145.87.10:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=xx.s0.yy.ac.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none&host=xx.s0.yy.ac.107421.xyz#Oracle-Seoul-ARM01-Vless
vless://1dde748d-32ee-4ed7-b70b-f2376d34e7e5@140.238.14.103:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=xx.s4.cc.hh.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none&host=xx.s4.cc.hh.107421.xyz#Oracle-Seoul-Vless
socks://emVhc2xpdHk6bG92ZW1tLjIz@42.192.52.227:22888#TC-SH-LosA-BanH
vless://717c40e7-efeb-45bc-8f5e-4e6e7d9eea18@89.208.251.209:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=octopus.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none#BanH-LosA-Vless
trojan://Vad3.123a%29asd%401234-as.dasd.asdazzS.123@89.208.251.209:443?flow=xtls-rprx-vision&security=tls&sni=xx.l4.cc.nn.107421.xyz&alpn=h2&fp=firefox&type=http&host=xx.l4.cc.nn.107421.xyz&path=status#BanH-LosA-Trojan
trojan://ADasfsaad12.21312%40113.adsaddasds.112321@140.238.14.103:443?flow=xtls-rprx-vision&security=tls&sni=xx.t2.ll.c0.107421.xyz&alpn=h2&fp=firefox&type=http&host=xx.t2.ll.c0.107421.xyz&path=vlh2tokyo2#Oracle-Tokyo-Trojan
vless://12491d80-745c-4e26-a58b-edf584afb208@129.146.57.94:443?encryption=none&flow=xtls-rprx-vision&security=tls&sni=zc.p4.cc.xx.107421.xyz&alpn=h2&fp=firefox&type=tcp&headerType=none#Oracle-Pheonix-ARM02-Vless
vless://7e27da0c-3013-4ed4-817b-50cc76a0bf81@43.154.83.213:24449?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-KOR-Seoul-R-TCHK
vless://c751811a-404f-4a05-bc41-5d572e741398@43.154.83.213:25000?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-JPN-Tokyo-R-TCHK
vless://fce2a9c6-1380-4ffa-ba84-6b9ec9ee2eea@43.154.83.213:25001?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#Oracle-USA-Phoenix-R-TCHK
vless://93be1d17-8e02-449d-bb99-683ed46fbe50@43.154.83.213:24453?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#CF-HongKong-R-TCHK
vless://cdf0b19a-9524-48d5-b697-5f10bb567734@43.154.83.213:24452?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-HongKong
vless://b1417d92-998d-410b-a5f3-cf144b6f043e@45.134.50.233:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=bingo.107421.xyz#Care-DEU-Dusseldorf
vless://1089cc14-557e-47ac-ac85-c07957b3cce3@140.238.14.103:443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.s4.cc.hh.107421.xyz#Oracle-KOR-Seoul
vless://6055eac4-dee7-463b-b575-d30ea94bb768@43.154.83.213:24444?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-DEU-Frankfurt
vless://1cd284b2-d3d8-4165-b773-893f836c2b51@43.154.83.213:24445?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-KOR-Seoul
vless://bf0e9c35-84a9-460e-b5bf-2fa9f2fb3bca@43.154.83.213:24446?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-JPN-Tokyo
vless://adc19390-373d-4dfc-b0f6-19fab1b6fbf6@43.154.83.213:24447?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-GBR-London
vless://e31bc28e-8ebd-4d72-a98e-9227f26dfac3@43.154.83.213:24448?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-SGP
vless://56fb312c-bdb0-48ca-bf66-4a2dd34040c6@43.154.83.213:24450?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=book.107421.xyz#FV-USA-LosAngles
vless://86c50e3a-5b87-49dd-bd20-03c7f2735e40@bingo.pp.icederce.ip-ddns.com:8443?encryption=none&security=tls&type=ws&sni=pp.icederce.ip-ddns.com&host=pp.icederce.ip-ddns.com&path=/?ed=2560#CF_VIDEO_1
vless://86c50e3a-5b87-49dd-bd20-03c7f2735e40@bingo.icederce.ip-ddns.com:8443?encryption=none&security=tls&type=ws&sni=pp.icederce.ip-ddns.com&host=pp.icederce.ip-ddns.com&path=/?ed=2560#CF_VIDEO_2
vless://21dab95b-088e-47bd-8351-609fd23cb33c@140.238.14.103:20443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.t2.ll.c0.107421.xyz#Oracle-JPN-Tokyo-R-OSel
vless://4c2dd763-56e5-408f-bc8f-dbf4c1fe41f9@140.238.14.103:21443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.o1.vl.s4.107421.xyz#Oracle-JPN-Osaka-R-OSel
vless://de576486-e254-4d9d-949a-37088358ec23@140.238.14.103:22443?type=tcp&encryption=none&security=tls&path=%2f&flow=xtls-rprx-vision&sni=xx.p2.vl.s4.107421.xyz#Oracle-USA-Phoenix-R-OSel

View File

@@ -1,896 +0,0 @@
#---------------------------------------------------#
## 更新2022-09-28 09:02:50
## 感谢https://github.com/Hackl0us/SS-Rule-Snippet
## 链接https://link.oness.xyz/link/0fHPiayjsMIn6BUC?clash=1
#---------------------------------------------------#
# HTTP 代理端口
port: 7890
# SOCKS5 代理端口
socks-port: 7891
# Linux 和 macOS 的 redir 代理端口
redir-port: 7892
# 允许局域网的连接
allow-lan: true
# 规则模式Rule规则 / Global全局代理/ Direct全局直连
mode: Rule
# 设置日志输出级别 (默认级别silent即不输出任何内容以避免因日志内容过大而导致程序内存溢出
# 5 个级别silent / info / warning / error / debug。级别越高日志输出量越大越倾向于调试若需要请自行开启。
log-level: info
# Clash 的 RESTful API
external-controller: '0.0.0.0:9090'
# RESTful API 的口令
secret: ''
# 您可以将静态网页资源(如 clash-dashboard放置在一个目录中clash 将会服务于 `RESTful API/ui`
# 参数应填写配置目录的相对路径或绝对路径。
# external-ui: folder
proxies:
# vmess
# cipher support auto/aes-128-gcm/chacha20-poly1305/none
- name: "Tencent-Shanghai-Relay"
type: vmess
server: 42.192.52.227
port: 19999
uuid: 7318178c-5583-40dd-996c-a0add1f8fc1e
alterId: 0
cipher: auto
# udp: true
tls: false
skip-cert-verify: true
# servername: example.com # priority over wss host
network: http
http-opts:
host:
path:
- /v2ice-vmess-tcp-seoul
# headers:
# Host: v2ray.com
# max-early-data: 2048
# early-data-header-name: Sec-WebSocket-Protocol
proxy-groups:
- name: Proxy
type: select
# disable-udp: true
proxies:
- Tencent-Shanghai-Relay
- name: Direct
type: select
proxies:
- DIRECT
- name: Domestic
type: select
proxies:
- DIRECT
- Proxy
- name: Others
type: select
proxies:
- Proxy
- DIRECT
# 规则
rules:
# anti-ads
- DOMAIN-KEYWORD,adservice,REJECT
- DOMAIN-SUFFIX,adcolony.com,REJECT
- DOMAIN-SUFFIX,adinall.com,REJECT
- DOMAIN-SUFFIX,admaster.com.cn,REJECT
- DOMAIN-SUFFIX,admob.com,REJECT
- DOMAIN-SUFFIX,adnxs.com,REJECT
- DOMAIN-SUFFIX,adnyg.com,REJECT
- DOMAIN-SUFFIX,adsensor.org,REJECT
- DOMAIN-SUFFIX,adsymptotic.com,REJECT
- DOMAIN-SUFFIX,adthor.com,REJECT
- DOMAIN-SUFFIX,adwhirl.com,REJECT
- DOMAIN-SUFFIX,amazon-adsystem.com,REJECT
- DOMAIN-SUFFIX,amobee.com,REJECT
- DOMAIN-SUFFIX,app-adforce.jp,REJECT
- DOMAIN-SUFFIX,appads.com,REJECT
- DOMAIN-SUFFIX,appcpi.net,REJECT
- DOMAIN-SUFFIX,appier.net,REJECT
- DOMAIN-SUFFIX,applift.com,REJECT
- DOMAIN-SUFFIX,applovin.com,REJECT
- DOMAIN-SUFFIX,applvn.com,REJECT
- DOMAIN-SUFFIX,apsalar.com,REJECT
- DOMAIN-SUFFIX,apxadtracking.net,REJECT
- DOMAIN-SUFFIX,axonix.com,REJECT
- DOMAIN-SUFFIX,bayimob.com,REJECT
- DOMAIN-SUFFIX,bjvvqu.cn,REJECT
- DOMAIN-SUFFIX,bulldogcpi.com,REJECT
- DOMAIN-SUFFIX,clotfun.mobi,REJECT
- DOMAIN-SUFFIX,clotfun.online,REJECT
- DOMAIN-SUFFIX,cloudmobi.net,REJECT
- DOMAIN-SUFFIX,crwdcntrl.net,REJECT
- DOMAIN-SUFFIX,ctrmi.com,REJECT
- DOMAIN-SUFFIX,exosrv.com,REJECT
- DOMAIN-SUFFIX,go2cloud.org,REJECT
- DOMAIN-SUFFIX,growingio.com,REJECT
- DOMAIN-SUFFIX,haloapps.com,REJECT
- DOMAIN-SUFFIX,hypers.com,REJECT
- DOMAIN-SUFFIX,idealads.net,REJECT
- DOMAIN-SUFFIX,inmobi.cn,REJECT
- DOMAIN-SUFFIX,inmobi.com,REJECT
- DOMAIN-SUFFIX,inmobi.net,REJECT
- DOMAIN-SUFFIX,inmobicdn.cn,REJECT
- DOMAIN-SUFFIX,inmobicdn.net,REJECT
- DOMAIN-SUFFIX,inner-active.mobi,REJECT
- DOMAIN-SUFFIX,insurads.com,REJECT
- DOMAIN-SUFFIX,ironsrc.com,REJECT
- DOMAIN-SUFFIX,irs01.com,REJECT
- DOMAIN-SUFFIX,iskyworker.com,REJECT
- DOMAIN-SUFFIX,juicyads.com,REJECT
- DOMAIN-SUFFIX,kochava.com,REJECT
- DOMAIN-SUFFIX,leadboltmobile.net,REJECT
- DOMAIN-SUFFIX,lenzmx.com,REJECT
- DOMAIN-SUFFIX,liveadvert.com,REJECT
- DOMAIN-SUFFIX,lnk0.com,REJECT
- DOMAIN-SUFFIX,lnk8.cn,REJECT
- DOMAIN-SUFFIX,localytics.com,REJECT
- DOMAIN-SUFFIX,mdfull.com,REJECT
- DOMAIN-SUFFIX,measurementapi.com,REJECT
- DOMAIN-SUFFIX,medialytics.com,REJECT
- DOMAIN-SUFFIX,meetrics.com,REJECT
- DOMAIN-SUFFIX,meetrics.net,REJECT
- DOMAIN-SUFFIX,miaozhen.com,REJECT
- DOMAIN-SUFFIX,mmstat.com,REJECT
- DOMAIN-SUFFIX,moatads.com,REJECT
- DOMAIN-SUFFIX,mobclix.com,REJECT
- DOMAIN-SUFFIX,mopub.com,REJECT
- DOMAIN-SUFFIX,okjhb.xyz,REJECT
- DOMAIN-SUFFIX,openx.net,REJECT
- DOMAIN-SUFFIX,outbrain.com,REJECT
- DOMAIN-SUFFIX,pubmatic.com,REJECT
- DOMAIN-SUFFIX,qchannel01.cn,REJECT
- DOMAIN-SUFFIX,rayjump.com,REJECT
- DOMAIN-SUFFIX,rtbasia.com,REJECT
- DOMAIN-SUFFIX,rubiconproject.com,REJECT
- DOMAIN-SUFFIX,scorecardresearch.com,REJECT
- DOMAIN-SUFFIX,sdkclick.com,REJECT
- DOMAIN-SUFFIX,shuzilm.cn,REJECT
- DOMAIN-SUFFIX,smaato.net,REJECT
- DOMAIN-SUFFIX,smartadserver.com,REJECT
- DOMAIN-SUFFIX,smartnews-ads.com,REJECT
- DOMAIN-SUFFIX,supersonic.com,REJECT
- DOMAIN-SUFFIX,supersonicads.com,REJECT
- DOMAIN-SUFFIX,tagtic.cn,REJECT
- DOMAIN-SUFFIX,tanv.com,REJECT
- DOMAIN-SUFFIX,tanx.com,REJECT
- DOMAIN-SUFFIX,tapjoy.com,REJECT
- DOMAIN-SUFFIX,trafficjunky.net,REJECT
- DOMAIN-SUFFIX,turn.com,REJECT
- DOMAIN-SUFFIX,uri6.com,REJECT
- DOMAIN-SUFFIX,vidoomy.com,REJECT
- DOMAIN-SUFFIX,vungle.com,REJECT
- DOMAIN-SUFFIX,wedolook.com,REJECT
- DOMAIN-SUFFIX,xdrig.com,REJECT
- DOMAIN-SUFFIX,yumimobi.com,REJECT
- DOMAIN-SUFFIX,zu08e.cn,REJECT
- DOMAIN-SUFFIX,ad.cmvideo.cn,REJECT
- DOMAIN-SUFFIX,ad.daum.net,REJECT
- DOMAIN,abema-adx.ameba.jp,REJECT
- DOMAIN,ad.12306.cn,REJECT
- DOMAIN,ad.360in.com,REJECT
- DOMAIN,ad.51wnl-cq.com,REJECT
- DOMAIN,ad.caiyunapp.com,REJECT
- DOMAIN,ad.huajiao.com,REJECT
- DOMAIN,ad.hzyoka.com,REJECT
- DOMAIN,ad.jiemian.com,REJECT
- DOMAIN,ad.qingting.fm,REJECT
- DOMAIN,ad.wappalyzer.com,REJECT
- DOMAIN,ad-cn.jovcloud.com,REJECT
- DOMAIN,adextra.51wnl-cq.com,REJECT
- DOMAIN,api.adnet.mob.com,REJECT
- DOMAIN,ads.adadapted.com,REJECT
- DOMAIN,ads.chinadaily.com.cn,REJECT
- DOMAIN,ads.daydaycook.com.cn,REJECT
- DOMAIN,ads.weilitoutiao.net,REJECT
- DOMAIN,adsapi.manhuaren.com,REJECT
- DOMAIN,adsdk.dmzj.com,REJECT
- DOMAIN,adserver.pandora.com,REJECT
- DOMAIN,adshow.58.com,REJECT
- DOMAIN,adui.tg.meitu.com,REJECT
- DOMAIN,adv.bandi.so,REJECT
- DOMAIN,app-ad.variflight.com,REJECT
- DOMAIN,appnext.hs.llnwd.net,REJECT
- DOMAIN,appnext-a.akamaihd.net,REJECT
- DOMAIN,ggs.myzaker.com,REJECT
- DOMAIN,itad.linetv.tw,REJECT
- DOMAIN,ja.chushou.tv,REJECT
- DOMAIN,mads.suning.com,REJECT
- DOMAIN,mobileads.msn.com,REJECT
- DOMAIN,mopnativeadv.037201.com,REJECT
- DOMAIN,nativeadv.dftoutiao.com,REJECT
- DOMAIN-SUFFIX,iadsdk.apple.com,REJECT
- DOMAIN-SUFFIX,ads.internal.unity3d.com,REJECT
- DOMAIN-SUFFIX,ads.prd.ie.internal.unity3d.com,REJECT
- DOMAIN-SUFFIX,unityads.unity3d.com,REJECT
- DOMAIN,optimus-ads.amap.com,REJECT
- DOMAIN,optimus-ads.amap.com.w.alikunlun.com,REJECT
- DOMAIN,tunion-api.m.taobao.com,REJECT
- DOMAIN,adproxy.autohome.com.cn,REJECT
- DOMAIN,rd.autohome.com.cn,REJECT
- DOMAIN,al.autohome.com.cn,REJECT
- DOMAIN,applogapi.autohome.com.cn,REJECT
- DOMAIN-SUFFIX,cpro.baidu.com,REJECT
- DOMAIN-SUFFIX,pos.baidu.com,REJECT
- DOMAIN,afd.baidu.com,REJECT
- DOMAIN,als.baidu.com,REJECT
- DOMAIN,duclick.baidu.com,REJECT
- DOMAIN,mobads.baidu.com,REJECT
- DOMAIN,mobads-logs.baidu.com,REJECT
- DOMAIN,nsclick.baidu.com,REJECT
- DOMAIN,ad.toutiao.com,REJECT
- DOMAIN,adx.yiche.com,REJECT
- DOMAIN,log.ycapp.yiche.com,REJECT
- DOMAIN,advertise.baicizhan.com,REJECT
- DOMAIN,advertise.baicizhan.org,REJECT
- DOMAIN,galaxy.bjcathay.com,REJECT
- DOMAIN,mdrecv.app.cntvwb.cn,REJECT
- DOMAIN,sdapprecv.app.cntvwb.cn,REJECT
- DOMAIN,vdapprecv.app.cntvwb.cn,REJECT
- DOMAIN,ad.21cn.com,REJECT
- DOMAIN,ad.k.21cn.com,REJECT
- DOMAIN,admarket.21cn.com,REJECT
- DOMAIN,adshows.21cn.com,REJECT
- DOMAIN,atrace.chelaile.net.cn,REJECT
- DOMAIN,logs.chelaile.net.cn,REJECT
- DOMAIN-SUFFIX,doubleclick.net,REJECT
- DOMAIN-SUFFIX,googleadservices.com,REJECT
- DOMAIN-SUFFIX,googleadsserving.cn,REJECT
- DOMAIN-SUFFIX,googlesyndication.com,REJECT
- DOMAIN-SUFFIX,da.mgtv.com,REJECT
- DOMAIN-SUFFIX,da.hunantv.com,REJECT
- DOMAIN,adx.hupu.com,REJECT
- DOMAIN,adx-api.hupu.com,REJECT
- DOMAIN,goblin.hupu.com,REJECT
- DOMAIN,t7z.cupid.iqiyi.com,REJECT
- IP-CIDR,101.227.97.240/32,REJECT,no-resolve
- IP-CIDR,101.227.200.11/32,REJECT,no-resolve
- IP-CIDR,101.227.200.28/32,REJECT,no-resolve
- IP-CIDR,124.192.153.42/32,REJECT,no-resolve
- DOMAIN-SUFFIX,deliver.ifeng.com,REJECT
- DOMAIN,api.newad.ifeng.com,REJECT
- DOMAIN,ifengad.3g.ifeng.com,REJECT
- DOMAIN,adserviceretry.kugou.com,REJECT
- DOMAIN,ads.service.kugou.com,REJECT
- DOMAIN,adsfile.bssdlbig.kugou.com,REJECT
- DOMAIN,g.koowo.com,REJECT
- DOMAIN,kgmobilestat.kugou.com,REJECT
- DOMAIN,kgmobilestatbak.kugou.com,REJECT
- DOMAIN,mobilelog.kugou.com,REJECT
- DOMAIN,mobilead.kuwo.cn,REJECT
- DOMAIN,rich.kuwo.cn,REJECT
- DOMAIN,ad-stat.ksosoft.com,REJECT
- DOMAIN,img.auction-ads.wpscdn.cn,REJECT
- DOMAIN,counter.kingsoft.com,REJECT
- DOMAIN,counter.ksosoft.com,REJECT
- DOMAIN,minfo.wps.cn,REJECT
- DOMAIN,mobad.ijinshan.com,REJECT
- DOMAIN,ups.ksmobile.net,REJECT
- DOMAIN,ws.ksmobile.net,REJECT
- DOMAIN-SUFFIX,webp2p.letv.com,REJECT
- DOMAIN,ark.letv.com,REJECT
- DOMAIN,emma-414870e223.huodonghezi.com,REJECT
- DOMAIN,g3.letv.com,REJECT
- DOMAIN,n.mark.letv.com,REJECT
- DOMAIN,ad.hpplay.cn,REJECT
- DOMAIN,adcdn.hpplay.cn,REJECT
- DOMAIN,adeng.hpplay.cn,REJECT
- DOMAIN,rp.hpplay.cn,REJECT
- DOMAIN-SUFFIX,ad.intl.xiaomi.com,REJECT
- DOMAIN-SUFFIX,ad.xiaomi.com,REJECT
- DOMAIN-SUFFIX,admob.xiaomi.com,REJECT
- DOMAIN,adv.sec.intl.miui.com,REJECT
- DOMAIN,adv.sec.miui.com,REJECT
- DOMAIN,ad.api.moji.com,REJECT
- DOMAIN,adlaunch.moji.com,REJECT
- DOMAIN,ads.mojicdn.com,REJECT
- DOMAIN,v1.log.moji.com,REJECT
- DOMAIN,ad.bn.netease.com,REJECT
- DOMAIN,ad.yixin.im,REJECT
- DOMAIN,admusicpic.music.126.net,REJECT
- DOMAIN,gorgon.youdao.com,REJECT
- DOMAIN,iadmat.nosdn.127.net,REJECT
- DOMAIN,iadmusicmat.music.126.net,REJECT
- DOMAIN,iadmusicmatvideo.music.126.net,REJECT
- DOMAIN,impservice.dictapp.youdao.com,REJECT
- DOMAIN,impservice.youdao.com,REJECT
- DOMAIN,log.yex.youdao.com,REJECT
- DOMAIN,log-yex.youdao.com,REJECT
- DOMAIN,n.3g.163.com,REJECT
- DOMAIN,nex.163.com,REJECT
- DOMAIN,yt-adp.nosdn.127.net,REJECT
- DOMAIN,yt-adp.ws.126.net,REJECT
- DOMAIN,ads.aplus.pptv.com,REJECT
- DOMAIN,ads.aplusapi.pptv.com,REJECT
- DOMAIN,asimgs.pplive.cn,REJECT
- DOMAIN,de.as.pptv.com,REJECT
- DOMAIN,regist.fotoable.com,REJECT
- DOMAIN,cdn.adapi.fotoable.com,REJECT
- DOMAIN,adnew.wifi8.com,REJECT
- DOMAIN,adfile.wifi8.com,REJECT
- DOMAIN-SUFFIX,beacon.sina.com.cn,REJECT
- DOMAIN,adimg.vue.weibo.com,REJECT
- DOMAIN,u1.img.mobile.sina.cn,REJECT
- DOMAIN,sax.sina.com.cn,REJECT
- DOMAIN,saxs.sina.com.cn,REJECT
- DOMAIN,saxn.sina.com.cn,REJECT
- DOMAIN-SUFFIX,ad.sohu.com,REJECT
- DOMAIN-SUFFIX,ads.sohu.com,REJECT
- DOMAIN-SUFFIX,aty.sohu.com,REJECT
- DOMAIN,imp.optaim.com,REJECT
- DOMAIN,v2.reachmax.cn,REJECT
- DOMAIN,track.sohu.com,REJECT
- DOMAIN,hui.sohu.com,REJECT
- DOMAIN-SUFFIX,e.qq.com,REJECT
- DOMAIN-SUFFIX,gdt.qq.com,REJECT
- DOMAIN-SUFFIX,l.qq.com,REJECT
- DOMAIN,adsmind.apdcdn.tc.qq.com,REJECT
- DOMAIN,adsmind.gdtimg.com,REJECT
- DOMAIN,adsmind.tc.qq.com,REJECT
- DOMAIN,pgdt.gtimg.cn,REJECT
- DOMAIN,pgdt.gtimg.com,REJECT
- DOMAIN,pgdt.ugdtimg.com,REJECT
- DOMAIN,splashqqlive.gtimg.com,REJECT
- DOMAIN,wa.gtimg.com,REJECT
- DOMAIN,wxsnsdy.wxs.qq.com,REJECT
- DOMAIN,wxsnsdythumb.wxs.qq.com,REJECT
- DOMAIN,admonitor.thepaper.cn,REJECT
- DOMAIN,adpai.thepaper.cn,REJECT
- DOMAIN,imgadpai.thepaper.cn,REJECT
- DOMAIN,adsp.xunlei.com,REJECT
- DOMAIN,etl.xlmc.sandai.net,REJECT
- DOMAIN,adm.10jqka.com.cn,REJECT
- DOMAIN,stat.10jqka.com.cn,REJECT
- DOMAIN,ad-analysis.pconline.com.cn,REJECT
- DOMAIN,iad0ssl.pcauto.com.cn,REJECT
- DOMAIN,iad0ssl.pconline.com.cn,REJECT
- DOMAIN,imgad0.pcauto.com.cn,REJECT
- DOMAIN,imgad0.pconline.com.cn,REJECT
- DOMAIN,ivy.pchouse.com.cn,REJECT
- DOMAIN,a.wkanx.com,REJECT
- DOMAIN,cwx.lianwangtech.com,REJECT
- DOMAIN,c1wx.lianwangtech.com,REJECT
- DOMAIN,ad.ximalaya.com,REJECT
- DOMAIN,adbs.ximalaya.com,REJECT
- DOMAIN,adse.ximalaya.com,REJECT
- DOMAIN,adse.wsa.ximalaya.com,REJECT
- DOMAIN,adbehavior.wsa.ximalaya.com,REJECT
- DOMAIN,adsebs.ximalaya.com,REJECT
- DOMAIN,ads-img-qc.xhscdn.com,REJECT
- DOMAIN,ads-video-qc.xhscdn.com,REJECT
- DOMAIN,t-ads.xiaohongshu.com,REJECT
- DOMAIN-SUFFIX,atm.youku.com,REJECT
- DOMAIN,ad.mobile.youku.com,REJECT
- DOMAIN,iyes.youku.com,REJECT
- DOMAIN,apppv.zol.com.cn,REJECT
- DOMAIN,pvnapp.zol.com.cn,REJECT
# (DNS Cache Pollution Protection)
# > Google
- DOMAIN-SUFFIX,appspot.com,Proxy
- DOMAIN-SUFFIX,blogger.com,Proxy
- DOMAIN-SUFFIX,getoutline.org,Proxy
- DOMAIN-SUFFIX,gvt0.com,Proxy
- DOMAIN-SUFFIX,gvt1.com,Proxy
- DOMAIN-SUFFIX,gvt3.com,Proxy
- DOMAIN-SUFFIX,xn--ngstr-lra8j.com,Proxy
- DOMAIN-KEYWORD,google,Proxy
- DOMAIN-KEYWORD,blogspot,Proxy
# > Facebook
- DOMAIN-SUFFIX,cdninstagram.com,Proxy
- DOMAIN-SUFFIX,fb.com,Proxy
- DOMAIN-SUFFIX,fb.me,Proxy
- DOMAIN-SUFFIX,fbaddins.com,Proxy
- DOMAIN-SUFFIX,fbcdn.net,Proxy
- DOMAIN-SUFFIX,fbsbx.com,Proxy
- DOMAIN-SUFFIX,fbworkmail.com,Proxy
- DOMAIN-SUFFIX,instagram.com,Proxy
- DOMAIN-SUFFIX,m.me,Proxy
- DOMAIN-SUFFIX,messenger.com,Proxy
- DOMAIN-SUFFIX,oculus.com,Proxy
- DOMAIN-SUFFIX,oculuscdn.com,Proxy
- DOMAIN-SUFFIX,rocksdb.org,Proxy
- DOMAIN-SUFFIX,whatsapp.com,Proxy
- DOMAIN-SUFFIX,whatsapp.net,Proxy
- DOMAIN-KEYWORD,facebook,Proxy
# > Twitter
- DOMAIN-SUFFIX,pscp.tv,Proxy
- DOMAIN-SUFFIX,periscope.tv,Proxy
- DOMAIN-SUFFIX,t.co,Proxy
- DOMAIN-SUFFIX,twimg.co,Proxy
- DOMAIN-SUFFIX,twimg.com,Proxy
- DOMAIN-SUFFIX,twitpic.com,Proxy
- DOMAIN-SUFFIX,vine.co,Proxy
- DOMAIN-KEYWORD,twitter,Proxy
# > Telegram
- DOMAIN-SUFFIX,t.me,Proxy
- DOMAIN-SUFFIX,tdesktop.com,Proxy
- DOMAIN-SUFFIX,telegra.ph,Proxy
- DOMAIN-SUFFIX,telegram.me,Proxy
- DOMAIN-SUFFIX,telegram.org,Proxy
# > Line
- DOMAIN-SUFFIX,line.me,Proxy
- DOMAIN-SUFFIX,line-apps.com,Proxy
- DOMAIN-SUFFIX,line-scdn.net,Proxy
- DOMAIN-SUFFIX,naver.jp,Proxy
# > Other
- DOMAIN-SUFFIX,4shared.com,Proxy
- DOMAIN-SUFFIX,881903.com,Proxy
- DOMAIN-SUFFIX,abc.net.au,Proxy
- DOMAIN-SUFFIX,abebooks.com,Proxy
- DOMAIN-SUFFIX,amazon.co.jp,Proxy
- DOMAIN-SUFFIX,apigee.com,Proxy
- DOMAIN-SUFFIX,apk-dl.com,Proxy
- DOMAIN-SUFFIX,apkmirror.com,Proxy
- DOMAIN-SUFFIX,apkmonk.com,Proxy
- DOMAIN-SUFFIX,apkpure.com,Proxy
- DOMAIN-SUFFIX,aptoide.com,Proxy
- DOMAIN-SUFFIX,archive.is,Proxy
- DOMAIN-SUFFIX,archive.org,Proxy
- DOMAIN-SUFFIX,arte.tv,Proxy
- DOMAIN-SUFFIX,ask.com,Proxy
- DOMAIN-SUFFIX,avgle.com,Proxy
- DOMAIN-SUFFIX,badoo.com,Proxy
- DOMAIN-SUFFIX,bandwagonhost.com,Proxy
- DOMAIN-SUFFIX,bbc.com,Proxy
- DOMAIN-SUFFIX,behance.net,Proxy
- DOMAIN-SUFFIX,bibox.com,Proxy
- DOMAIN-SUFFIX,biggo.com.tw,Proxy
- DOMAIN-SUFFIX,binance.com,Proxy
- DOMAIN-SUFFIX,bitcointalk.org,Proxy
- DOMAIN-SUFFIX,bitfinex.com,Proxy
- DOMAIN-SUFFIX,bitmex.com,Proxy
- DOMAIN-SUFFIX,bit-z.com,Proxy
- DOMAIN-SUFFIX,bloglovin.com,Proxy
- DOMAIN-SUFFIX,bloomberg.cn,Proxy
- DOMAIN-SUFFIX,bloomberg.com,Proxy
- DOMAIN-SUFFIX,book.com.tw,Proxy
- DOMAIN-SUFFIX,booklive.jp,Proxy
- DOMAIN-SUFFIX,books.com.tw,Proxy
- DOMAIN-SUFFIX,box.com,Proxy
- DOMAIN-SUFFIX,brookings.edu,Proxy
- DOMAIN-SUFFIX,businessinsider.com,Proxy
- DOMAIN-SUFFIX,bwh1.net,Proxy
- DOMAIN-SUFFIX,castbox.fm,Proxy
- DOMAIN-SUFFIX,cbc.ca,Proxy
- DOMAIN-SUFFIX,cdw.com,Proxy
- DOMAIN-SUFFIX,change.org,Proxy
- DOMAIN-SUFFIX,ck101.com,Proxy
- DOMAIN-SUFFIX,clarionproject.org,Proxy
- DOMAIN-SUFFIX,clyp.it,Proxy
- DOMAIN-SUFFIX,cna.com.tw,Proxy
- DOMAIN-SUFFIX,comparitech.com,Proxy
- DOMAIN-SUFFIX,conoha.jp,Proxy
- DOMAIN-SUFFIX,crucial.com,Proxy
- DOMAIN-SUFFIX,cts.com.tw,Proxy
- DOMAIN-SUFFIX,cw.com.tw,Proxy
- DOMAIN-SUFFIX,cyberctm.com,Proxy
- DOMAIN-SUFFIX,dailymotion.com,Proxy
- DOMAIN-SUFFIX,dailyview.tw,Proxy
- DOMAIN-SUFFIX,daum.net,Proxy
- DOMAIN-SUFFIX,daumcdn.net,Proxy
- DOMAIN-SUFFIX,dcard.tw,Proxy
- DOMAIN-SUFFIX,deepdiscount.com,Proxy
- DOMAIN-SUFFIX,deezer.com,Proxy
- DOMAIN-SUFFIX,depositphotos.com,Proxy
- DOMAIN-SUFFIX,disconnect.me,Proxy
- DOMAIN-SUFFIX,discordapp.com,Proxy
- DOMAIN-SUFFIX,discordapp.net,Proxy
- DOMAIN-SUFFIX,disqus.com,Proxy
- DOMAIN-SUFFIX,dns2go.com,Proxy
- DOMAIN-SUFFIX,dropbox.com,Proxy
- DOMAIN-SUFFIX,dropboxusercontent.com,Proxy
- DOMAIN-SUFFIX,duckduckgo.com,Proxy
- DOMAIN-SUFFIX,dw.com,Proxy
- DOMAIN-SUFFIX,dynu.com,Proxy
- DOMAIN-SUFFIX,earthcam.com,Proxy
- DOMAIN-SUFFIX,ebookservice.tw,Proxy
- DOMAIN-SUFFIX,economist.com,Proxy
- DOMAIN-SUFFIX,edgecastcdn.net,Proxy
- DOMAIN-SUFFIX,edu,Proxy
- DOMAIN-SUFFIX,elpais.com,Proxy
- DOMAIN-SUFFIX,enanyang.my,Proxy
- DOMAIN-SUFFIX,euronews.com,Proxy
- DOMAIN-SUFFIX,feedly.com,Proxy
- DOMAIN-SUFFIX,files.wordpress.com,Proxy
- DOMAIN-SUFFIX,flickr.com,Proxy
- DOMAIN-SUFFIX,flitto.com,Proxy
- DOMAIN-SUFFIX,foreignpolicy.com,Proxy
- DOMAIN-SUFFIX,friday.tw,Proxy
- DOMAIN-SUFFIX,gate.io,Proxy
- DOMAIN-SUFFIX,getlantern.org,Proxy
- DOMAIN-SUFFIX,getsync.com,Proxy
- DOMAIN-SUFFIX,globalvoices.org,Proxy
- DOMAIN-SUFFIX,goo.ne.jp,Proxy
- DOMAIN-SUFFIX,goodreads.com,Proxy
- DOMAIN-SUFFIX,gov.tw,Proxy
- DOMAIN-SUFFIX,gumroad.com,Proxy
- DOMAIN-SUFFIX,hbg.com,Proxy
- DOMAIN-SUFFIX,hightail.com,Proxy
- DOMAIN-SUFFIX,hk01.com,Proxy
- DOMAIN-SUFFIX,hkbf.org,Proxy
- DOMAIN-SUFFIX,hkbookcity.com,Proxy
- DOMAIN-SUFFIX,hkej.com,Proxy
- DOMAIN-SUFFIX,hket.com,Proxy
- DOMAIN-SUFFIX,hkgolden.com,Proxy
- DOMAIN-SUFFIX,hootsuite.com,Proxy
- DOMAIN-SUFFIX,hudson.org,Proxy
- DOMAIN-SUFFIX,huobi.pro,Proxy
- DOMAIN-SUFFIX,initiummall.com,Proxy
- DOMAIN-SUFFIX,ipfs.io,Proxy
- DOMAIN-SUFFIX,issuu.com,Proxy
- DOMAIN-SUFFIX,japantimes.co.jp,Proxy
- DOMAIN-SUFFIX,jiji.com,Proxy
- DOMAIN-SUFFIX,jinx.com,Proxy
- DOMAIN-SUFFIX,jkforum.net,Proxy
- DOMAIN-SUFFIX,joinmastodon.org,Proxy
- DOMAIN-SUFFIX,kakao.com,Proxy
- DOMAIN-SUFFIX,lihkg.com,Proxy
- DOMAIN-SUFFIX,live.com,Proxy
- DOMAIN-SUFFIX,mail.ru,Proxy
- DOMAIN-SUFFIX,matters.news,Proxy
- DOMAIN-SUFFIX,medium.com,Proxy
- DOMAIN-SUFFIX,mega.nz,Proxy
- DOMAIN-SUFFIX,mil,Proxy
- DOMAIN-SUFFIX,mobile01.com,Proxy
- DOMAIN-SUFFIX,naver.com,Proxy
- DOMAIN-SUFFIX,nikkei.com,Proxy
- DOMAIN-SUFFIX,nofile.io,Proxy
- DOMAIN-SUFFIX,now.com,Proxy
- DOMAIN-SUFFIX,nyt.com,Proxy
- DOMAIN-SUFFIX,nytchina.com,Proxy
- DOMAIN-SUFFIX,nytcn.me,Proxy
- DOMAIN-SUFFIX,nytco.com,Proxy
- DOMAIN-SUFFIX,nytimes.com,Proxy
- DOMAIN-SUFFIX,nytimg.com,Proxy
- DOMAIN-SUFFIX,nytlog.com,Proxy
- DOMAIN-SUFFIX,nytstyle.com,Proxy
- DOMAIN-SUFFIX,ok.ru,Proxy
- DOMAIN-SUFFIX,okex.com,Proxy
- DOMAIN-SUFFIX,pcloud.com,Proxy
- DOMAIN-SUFFIX,pinimg.com,Proxy
- DOMAIN-SUFFIX,pixiv.net,Proxy
- DOMAIN-SUFFIX,pornhub.com,Proxy
- DOMAIN-SUFFIX,pureapk.com,Proxy
- DOMAIN-SUFFIX,quora.com,Proxy
- DOMAIN-SUFFIX,quoracdn.net,Proxy
- DOMAIN-SUFFIX,rakuten.co.jp,Proxy
- DOMAIN-SUFFIX,reddit.com,Proxy
- DOMAIN-SUFFIX,redditmedia.com,Proxy
- DOMAIN-SUFFIX,resilio.com,Proxy
- DOMAIN-SUFFIX,reuters.com,Proxy
- DOMAIN-SUFFIX,scmp.com,Proxy
- DOMAIN-SUFFIX,scribd.com,Proxy
- DOMAIN-SUFFIX,seatguru.com,Proxy
- DOMAIN-SUFFIX,shadowsocks.org,Proxy
- DOMAIN-SUFFIX,slideshare.net,Proxy
- DOMAIN-SUFFIX,soundcloud.com,Proxy
- DOMAIN-SUFFIX,startpage.com,Proxy
- DOMAIN-SUFFIX,steamcommunity.com,Proxy
- DOMAIN-SUFFIX,steemit.com,Proxy
- DOMAIN-SUFFIX,t66y.com,Proxy
- DOMAIN-SUFFIX,teco-hk.org,Proxy
- DOMAIN-SUFFIX,teco-mo.org,Proxy
- DOMAIN-SUFFIX,teddysun.com,Proxy
- DOMAIN-SUFFIX,theinitium.com,Proxy
- DOMAIN-SUFFIX,tineye.com,Proxy
- DOMAIN-SUFFIX,torproject.org,Proxy
- DOMAIN-SUFFIX,tumblr.com,Proxy
- DOMAIN-SUFFIX,turbobit.net,Proxy
- DOMAIN-SUFFIX,twitch.tv,Proxy
- DOMAIN-SUFFIX,udn.com,Proxy
- DOMAIN-SUFFIX,unseen.is,Proxy
- DOMAIN-SUFFIX,upmedia.mg,Proxy
- DOMAIN-SUFFIX,uptodown.com,Proxy
- DOMAIN-SUFFIX,ustream.tv,Proxy
- DOMAIN-SUFFIX,uwants.com,Proxy
- DOMAIN-SUFFIX,v2ray.com,Proxy
- DOMAIN-SUFFIX,viber.com,Proxy
- DOMAIN-SUFFIX,videopress.com,Proxy
- DOMAIN-SUFFIX,vimeo.com,Proxy
- DOMAIN-SUFFIX,voxer.com,Proxy
- DOMAIN-SUFFIX,vzw.com,Proxy
- DOMAIN-SUFFIX,w3schools.com,Proxy
- DOMAIN-SUFFIX,wattpad.com,Proxy
- DOMAIN-SUFFIX,whoer.net,Proxy
- DOMAIN-SUFFIX,wikimapia.org,Proxy
- DOMAIN-SUFFIX,wikipedia.org,Proxy
- DOMAIN-SUFFIX,wire.com,Proxy
- DOMAIN-SUFFIX,worldcat.org,Proxy
- DOMAIN-SUFFIX,wsj.com,Proxy
- DOMAIN-SUFFIX,wsj.net,Proxy
- DOMAIN-SUFFIX,xboxlive.com,Proxy
- DOMAIN-SUFFIX,xvideos.com,Proxy
- DOMAIN-SUFFIX,yahoo.com,Proxy
- DOMAIN-SUFFIX,yesasia.com,Proxy
- DOMAIN-SUFFIX,yes-news.com,Proxy
- DOMAIN-SUFFIX,yomiuri.co.jp,Proxy
- DOMAIN-SUFFIX,you-get.org,Proxy
- DOMAIN-SUFFIX,zb.com,Proxy
- DOMAIN-SUFFIX,zello.com,Proxy
- DOMAIN-SUFFIX,zeronet.io,Proxy
- DOMAIN,cdn-images.mailchimp.com,Proxy
- DOMAIN,id.heroku.com,Proxy
- DOMAIN-KEYWORD,github,Proxy
- DOMAIN-KEYWORD,jav,Proxy
- DOMAIN-KEYWORD,pinterest,Proxy
- DOMAIN-KEYWORD,porn,Proxy
- DOMAIN-KEYWORD,wikileaks,Proxy
# (Region-Restricted Access Denied)
- DOMAIN-SUFFIX,apartmentratings.com,Proxy
- DOMAIN-SUFFIX,apartments.com,Proxy
- DOMAIN-SUFFIX,bankmobilevibe.com,Proxy
- DOMAIN-SUFFIX,bing.com,Proxy
- DOMAIN-SUFFIX,booktopia.com.au,Proxy
- DOMAIN-SUFFIX,centauro.com.br,Proxy
- DOMAIN-SUFFIX,clearsurance.com,Proxy
- DOMAIN-SUFFIX,costco.com,Proxy
- DOMAIN-SUFFIX,crackle.com,Proxy
- DOMAIN-SUFFIX,depositphotos.cn,Proxy
- DOMAIN-SUFFIX,dish.com,Proxy
- DOMAIN-SUFFIX,dmm.co.jp,Proxy
- DOMAIN-SUFFIX,dmm.com,Proxy
- DOMAIN-SUFFIX,dnvod.tv,Proxy
- DOMAIN-SUFFIX,esurance.com,Proxy
- DOMAIN-SUFFIX,extmatrix.com,Proxy
- DOMAIN-SUFFIX,fastpic.ru,Proxy
- DOMAIN-SUFFIX,flipboard.com,Proxy
- DOMAIN-SUFFIX,fnac.be,Proxy
- DOMAIN-SUFFIX,fnac.com,Proxy
- DOMAIN-SUFFIX,funkyimg.com,Proxy
- DOMAIN-SUFFIX,fxnetworks.com,Proxy
- DOMAIN-SUFFIX,gettyimages.com,Proxy
- DOMAIN-SUFFIX,jcpenney.com,Proxy
- DOMAIN-SUFFIX,kknews.cc,Proxy
- DOMAIN-SUFFIX,nationwide.com,Proxy
- DOMAIN-SUFFIX,nbc.com,Proxy
- DOMAIN-SUFFIX,nordstrom.com,Proxy
- DOMAIN-SUFFIX,nordstromimage.com,Proxy
- DOMAIN-SUFFIX,nordstromrack.com,Proxy
- DOMAIN-SUFFIX,read01.com,Proxy
- DOMAIN-SUFFIX,superpages.com,Proxy
- DOMAIN-SUFFIX,target.com,Proxy
- DOMAIN-SUFFIX,thinkgeek.com,Proxy
- DOMAIN-SUFFIX,tracfone.com,Proxy
- DOMAIN-SUFFIX,uploader.jp,Proxy
- DOMAIN-SUFFIX,vevo.com,Proxy
- DOMAIN-SUFFIX,viu.tv,Proxy
- DOMAIN-SUFFIX,vk.com,Proxy
- DOMAIN-SUFFIX,vsco.co,Proxy
- DOMAIN-SUFFIX,xfinity.com,Proxy
- DOMAIN-SUFFIX,zattoo.com,Proxy
- DOMAIN,abc.com,Proxy
- DOMAIN,abc.go.com,Proxy
- DOMAIN,abc.net.au,Proxy
- DOMAIN,wego.here.com,Proxy
# > Telegram
- DOMAIN-SUFFIX,t.me,Proxy
- DOMAIN-SUFFIX,tdesktop.com,Proxy
- DOMAIN-SUFFIX,telegra.ph,Proxy
- DOMAIN-SUFFIX,telegram.me,Proxy
- DOMAIN-SUFFIX,telegram.org,Proxy
- IP-CIDR,91.108.0.0/16,Proxy,no-resolve
- IP-CIDR,109.239.140.0/24,Proxy,no-resolve
- IP-CIDR,149.154.160.0/20,Proxy,no-resolve
- IP-CIDR6,2001:67c:4e8::/48,Proxy,no-resolve
- IP-CIDR6,2001:b28:f23d::/48,Proxy,no-resolve
- IP-CIDR6,2001:b28:f23f::/48,Proxy,no-resolve
#USER-AGENT,Roam*,Proxy
# (The Most Popular Sites)
# > Apple
# > Apple URL Shortener
- DOMAIN-SUFFIX,appsto.re,Proxy
# > TestFlight
- DOMAIN,beta.itunes.apple.com,Proxy
# > iBooks Store download
- DOMAIN,books.itunes.apple.com,Proxy
# > iTunes Store Moveis Trailers
- DOMAIN,hls.itunes.apple.com,Proxy
# App Store Preview
- DOMAIN,itunes.apple.com,Proxy
# > Spotlight
- DOMAIN,api-glb-sea.smoot.apple.com,Proxy
# > Dictionary
- DOMAIN,lookup-api.apple.com,Proxy
#PROCESS-NAME,LookupViewService,Proxy
# > Google
- DOMAIN-SUFFIX,abc.xyz,Proxy
- DOMAIN-SUFFIX,android.com,Proxy
- DOMAIN-SUFFIX,androidify.com,Proxy
- DOMAIN-SUFFIX,dialogflow.com,Proxy
- DOMAIN-SUFFIX,autodraw.com,Proxy
- DOMAIN-SUFFIX,capitalg.com,Proxy
- DOMAIN-SUFFIX,certificate-transparency.org,Proxy
- DOMAIN-SUFFIX,chrome.com,Proxy
- DOMAIN-SUFFIX,chromeexperiments.com,Proxy
- DOMAIN-SUFFIX,chromestatus.com,Proxy
- DOMAIN-SUFFIX,chromium.org,Proxy
- DOMAIN-SUFFIX,creativelab5.com,Proxy
- DOMAIN-SUFFIX,debug.com,Proxy
- DOMAIN-SUFFIX,deepmind.com,Proxy
- DOMAIN-SUFFIX,firebaseio.com,Proxy
- DOMAIN-SUFFIX,getmdl.io,Proxy
- DOMAIN-SUFFIX,ggpht.com,Proxy
- DOMAIN-SUFFIX,gmail.com,Proxy
- DOMAIN-SUFFIX,gmodules.com,Proxy
- DOMAIN-SUFFIX,godoc.org,Proxy
- DOMAIN-SUFFIX,golang.org,Proxy
- DOMAIN-SUFFIX,gstatic.com,Proxy
- DOMAIN-SUFFIX,gv.com,Proxy
- DOMAIN-SUFFIX,gwtproject.org,Proxy
- DOMAIN-SUFFIX,itasoftware.com,Proxy
- DOMAIN-SUFFIX,madewithcode.com,Proxy
- DOMAIN-SUFFIX,material.io,Proxy
- DOMAIN-SUFFIX,polymer-project.org,Proxy
- DOMAIN-SUFFIX,admin.recaptcha.net,Proxy
- DOMAIN-SUFFIX,recaptcha.net,Proxy
- DOMAIN-SUFFIX,shattered.io,Proxy
- DOMAIN-SUFFIX,synergyse.com,Proxy
- DOMAIN-SUFFIX,tensorflow.org,Proxy
- DOMAIN-SUFFIX,tiltbrush.com,Proxy
- DOMAIN-SUFFIX,waveprotocol.org,Proxy
- DOMAIN-SUFFIX,waymo.com,Proxy
- DOMAIN-SUFFIX,webmproject.org,Proxy
- DOMAIN-SUFFIX,webrtc.org,Proxy
- DOMAIN-SUFFIX,whatbrowser.org,Proxy
- DOMAIN-SUFFIX,widevine.com,Proxy
- DOMAIN-SUFFIX,x.company,Proxy
- DOMAIN-SUFFIX,youtu.be,Proxy
- DOMAIN-SUFFIX,yt.be,Proxy
- DOMAIN-SUFFIX,ytimg.com,Proxy
# > Steam
- DOMAIN,media.steampowered.com,Proxy
- DOMAIN,store.steampowered.com,Proxy
# > Other
- DOMAIN-SUFFIX,0rz.tw,Proxy
- DOMAIN-SUFFIX,4bluestones.biz,Proxy
- DOMAIN-SUFFIX,9bis.net,Proxy
- DOMAIN-SUFFIX,allconnected.co,Proxy
- DOMAIN-SUFFIX,amazonaws.com,Proxy
- DOMAIN-SUFFIX,aol.com,Proxy
- DOMAIN-SUFFIX,bcc.com.tw,Proxy
- DOMAIN-SUFFIX,bit.ly,Proxy
- DOMAIN-SUFFIX,bitshare.com,Proxy
- DOMAIN-SUFFIX,blog.jp,Proxy
- DOMAIN-SUFFIX,blogimg.jp,Proxy
- DOMAIN-SUFFIX,blogtd.org,Proxy
- DOMAIN-SUFFIX,broadcast.co.nz,Proxy
- DOMAIN-SUFFIX,camfrog.com,Proxy
- DOMAIN-SUFFIX,cfos.de,Proxy
- DOMAIN-SUFFIX,citypopulation.de,Proxy
- DOMAIN-SUFFIX,cloudfront.net,Proxy
- DOMAIN-SUFFIX,ctitv.com.tw,Proxy
- DOMAIN-SUFFIX,cuhk.edu.hk,Proxy
- DOMAIN-SUFFIX,cusu.hk,Proxy
- DOMAIN-SUFFIX,discuss.com.hk,Proxy
- DOMAIN-SUFFIX,dropboxapi.com,Proxy
- DOMAIN-SUFFIX,edditstatic.com,Proxy
- DOMAIN-SUFFIX,flickriver.com,Proxy
- DOMAIN-SUFFIX,focustaiwan.tw,Proxy
- DOMAIN-SUFFIX,free.fr,Proxy
- DOMAIN-SUFFIX,ftchinese.com,Proxy
- DOMAIN-SUFFIX,gigacircle.com,Proxy
- DOMAIN-SUFFIX,gov,Proxy
- DOMAIN-SUFFIX,hk-pub.com,Proxy
- DOMAIN-SUFFIX,hosting.co.uk,Proxy
- DOMAIN-SUFFIX,hwcdn.net,Proxy
- DOMAIN-SUFFIX,jtvnw.net,Proxy
- DOMAIN-SUFFIX,linksalpha.com,Proxy
- DOMAIN-SUFFIX,manyvids.com,Proxy
- DOMAIN-SUFFIX,myactimes.com,Proxy
- DOMAIN-SUFFIX,newsblur.com,Proxy
- DOMAIN-SUFFIX,now.im,Proxy
- DOMAIN-SUFFIX,redditlist.com,Proxy
- DOMAIN-SUFFIX,signal.org,Proxy
- DOMAIN-SUFFIX,sparknotes.com,Proxy
- DOMAIN-SUFFIX,streetvoice.com,Proxy
- DOMAIN-SUFFIX,ttvnw.net,Proxy
- DOMAIN-SUFFIX,tv.com,Proxy
- DOMAIN-SUFFIX,twitchcdn.net,Proxy
- DOMAIN-SUFFIX,typepad.com,Proxy
- DOMAIN-SUFFIX,udnbkk.com,Proxy
- DOMAIN-SUFFIX,whispersystems.org,Proxy
- DOMAIN-SUFFIX,wikia.com,Proxy
- DOMAIN-SUFFIX,wn.com,Proxy
- DOMAIN-SUFFIX,wolframalpha.com,Proxy
- DOMAIN-SUFFIX,x-art.com,Proxy
- DOMAIN-SUFFIX,yimg.com,Proxy
- DOMAIN-KEYWORD,dlercloud,Proxy
- DOMAIN-SUFFIX,dler.cloud,Proxy
# Local Area Network
- DOMAIN-KEYWORD,announce,DIRECT
- DOMAIN-KEYWORD,torrent,DIRECT
- DOMAIN-KEYWORD,tracker,DIRECT
- DOMAIN-SUFFIX,smtp,DIRECT
- DOMAIN-SUFFIX,local,DIRECT
- IP-CIDR,192.168.0.0/16,DIRECT
- IP-CIDR,10.0.0.0/8,DIRECT
- IP-CIDR,172.16.0.0/12,DIRECT
- IP-CIDR,127.0.0.0/8,DIRECT
- IP-CIDR,100.64.0.0/10,DIRECT
# # > IQIYI
# - IP-CIDR,101.227.0.0/16,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# - IP-CIDR,101.224.0.0/13,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# - IP-CIDR,119.176.0.0/12,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# # > Youku
# - IP-CIDR,106.11.0.0/16,Bilibili|iQIYI|NeteaseMusic|TencentVideo
# > Telegram
- IP-CIDR,67.198.55.0/24,Proxy
- IP-CIDR,91.108.4.0/22,Proxy
- IP-CIDR,91.108.8.0/22,Proxy
- IP-CIDR,91.108.12.0/22,Proxy
- IP-CIDR,91.108.16.0/22,Proxy
- IP-CIDR,91.108.56.0/22,Proxy
- IP-CIDR,109.239.140.0/24,Proxy
- IP-CIDR,149.154.160.0/20,Proxy
- IP-CIDR,205.172.60.0/22,Proxy
# (Extra IP-CIRD)
# > Google
- IP-CIDR,35.190.247.0/24,Proxy
- IP-CIDR,64.233.160.0/19,Proxy
- IP-CIDR,66.102.0.0/20,Proxy
- IP-CIDR,66.249.80.0/20,Proxy
- IP-CIDR,72.14.192.0/18,Proxy
- IP-CIDR,74.125.0.0/16,Proxy
- IP-CIDR,108.177.8.0/21,Proxy
- IP-CIDR,172.217.0.0/16,Proxy
- IP-CIDR,173.194.0.0/16,Proxy
- IP-CIDR,209.85.128.0/17,Proxy
- IP-CIDR,216.58.192.0/19,Proxy
- IP-CIDR,216.239.32.0/19,Proxy
# > Facebook
- IP-CIDR,31.13.24.0/21,Proxy
- IP-CIDR,31.13.64.0/18,Proxy
- IP-CIDR,45.64.40.0/22,Proxy
- IP-CIDR,66.220.144.0/20,Proxy
- IP-CIDR,69.63.176.0/20,Proxy
- IP-CIDR,69.171.224.0/19,Proxy
- IP-CIDR,74.119.76.0/22,Proxy
- IP-CIDR,103.4.96.0/22,Proxy
- IP-CIDR,129.134.0.0/17,Proxy
- IP-CIDR,157.240.0.0/17,Proxy
- IP-CIDR,173.252.64.0/19,Proxy
- IP-CIDR,173.252.96.0/19,Proxy
- IP-CIDR,179.60.192.0/22,Proxy
- IP-CIDR,185.60.216.0/22,Proxy
- IP-CIDR,204.15.20.0/22,Proxy
# > Twitter
- IP-CIDR,69.195.160.0/19,Proxy
- IP-CIDR,104.244.42.0/21,Proxy
- IP-CIDR,192.133.76.0/22,Proxy
- IP-CIDR,199.16.156.0/22,Proxy
- IP-CIDR,199.59.148.0/22,Proxy
- IP-CIDR,199.96.56.0/21,Proxy
- IP-CIDR,202.160.128.0/22,Proxy
- IP-CIDR,209.237.192.0/19,Proxy
# GeoIP China
- GEOIP,CN,Domestic
- MATCH,Others

View File

@@ -1,28 +0,0 @@
mixed-port: 7890
allow-lan: false
external-controller: 127.0.0.1:61889
secret: 5c090877-21bb-4006-a97c-0bd4bfbb9be9
log-level: info
ipv6: false
proxy-groups:
- name: PROXY-ALL
type: select
proxies:
- tc-sh
proxies:
- name: proxy-server
type: socks5
server: 192.168.11.19
port: 22999
username: zeaslity
password: password
- name: tc-sh
type: socks5
server: 42.192.52.227
port: 28888
username: zeaslity
password: lovemm.23
- name: tc-sh
type: socks5
server: 42.192.52.227
port: 28889

View File

@@ -1,2 +1,14 @@
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ install --without-geodata bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ install --without-geodata
sed -i "s/nobody/root/g" /etc/systemd/system/xray.service
systemctl daemon-reload
systemctl restart xray
systemctl enable xray
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ upgrade
journalctl -u xray -n 100 -f
xx.l4.ca.bg.107421.xyz

View File

@@ -1,744 +0,0 @@
#!/usr/bin/env bash
#====================================================
# System Request:Debian 9+/Ubuntu 18.04+/Centos 7+
# Author: wulabing
# Dscription: Xray onekey Management
# email: admin@wulabing.com
#====================================================
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
stty erase ^?
cd "$(
cd "$(dirname "$0")" || exit
pwd
)" || exit
# 字体颜色配置
Green="\033[32m"
Red="\033[31m"
Yellow="\033[33m"
Blue="\033[36m"
Font="\033[0m"
GreenBG="\033[42;37m"
RedBG="\033[41;37m"
OK="${Green}[OK]${Font}"
ERROR="${Red}[ERROR]${Font}"
# 变量
shell_version="1.3.7"
github_branch="main"
xray_conf_dir="/usr/local/etc/xray"
website_dir="/www/xray_web/"
xray_access_log="/var/log/xray/access.log"
xray_error_log="/var/log/xray/error.log"
cert_dir="/usr/local/etc/xray"
domain_tmp_dir="/usr/local/etc/xray"
cert_group="nobody"
random_num=$((RANDOM % 12 + 4))
VERSION=$(echo "${VERSION}" | awk -F "[()]" '{print $2}')
WS_PATH="/$(head -n 10 /dev/urandom | md5sum | head -c ${random_num})/"
function shell_mode_check() {
if [ -f ${xray_conf_dir}/config.json ]; then
if [ "$(grep -c "wsSettings" ${xray_conf_dir}/config.json)" -ge 1 ]; then
shell_mode="ws"
else
shell_mode="tcp"
fi
else
shell_mode="None"
fi
}
function print_ok() {
echo -e "${OK} ${Blue} $1 ${Font}"
}
function print_error() {
echo -e "${ERROR} ${RedBG} $1 ${Font}"
}
function is_root() {
if [[ 0 == "$UID" ]]; then
print_ok "当前用户是 root 用户,开始安装流程"
else
print_error "当前用户不是 root 用户,请切换到 root 用户后重新执行脚本"
exit 1
fi
}
judge() {
if [[ 0 -eq $? ]]; then
print_ok "$1 完成"
sleep 1
else
print_error "$1 失败"
exit 1
fi
}
function system_check() {
source '/etc/os-release'
if [[ "${ID}" == "centos" && ${VERSION_ID} -ge 7 ]]; then
print_ok "当前系统为 Centos ${VERSION_ID} ${VERSION}"
INS="yum install -y"
wget -N -P /etc/yum.repos.d/ https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/basic/nginx.repo
elif [[ "${ID}" == "ol" ]]; then
print_ok "当前系统为 Oracle Linux ${VERSION_ID} ${VERSION}"
INS="yum install -y"
wget -N -P /etc/yum.repos.d/ https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/basic/nginx.repo
elif [[ "${ID}" == "debian" && ${VERSION_ID} -ge 9 ]]; then
print_ok "当前系统为 Debian ${VERSION_ID} ${VERSION}"
INS="apt install -y"
# 清除可能的遗留问题
rm -f /etc/apt/sources.list.d/nginx.list
$INS lsb-release gnupg2
echo "deb http://nginx.org/packages/debian $(lsb_release -cs) nginx" >/etc/apt/sources.list.d/nginx.list
curl -fsSL https://nginx.org/keys/nginx_signing.key | apt-key add -
apt update
elif [[ "${ID}" == "ubuntu" && $(echo "${VERSION_ID}" | cut -d '.' -f1) -ge 18 ]]; then
print_ok "当前系统为 Ubuntu ${VERSION_ID} ${UBUNTU_CODENAME}"
INS="apt install -y"
# 清除可能的遗留问题
rm -f /etc/apt/sources.list.d/nginx.list
$INS lsb-release gnupg2
echo "deb http://nginx.org/packages/ubuntu $(lsb_release -cs) nginx" >/etc/apt/sources.list.d/nginx.list
curl -fsSL https://nginx.org/keys/nginx_signing.key | apt-key add -
apt update
else
print_error "当前系统为 ${ID} ${VERSION_ID} 不在支持的系统列表内"
exit 1
fi
if [[ $(grep "nogroup" /etc/group) ]]; then
cert_group="nogroup"
fi
$INS dbus
# 关闭各类防火墙
systemctl stop firewalld
systemctl disable firewalld
systemctl stop nftables
systemctl disable nftables
systemctl stop ufw
systemctl disable ufw
}
function nginx_install() {
if ! command -v nginx >/dev/null 2>&1; then
${INS} nginx
judge "Nginx 安装"
else
print_ok "Nginx 已存在"
${INS} nginx
fi
# 遗留问题处理
mkdir -p /etc/nginx/conf.d >/dev/null 2>&1
}
function dependency_install() {
${INS} wget lsof tar
judge "安装 wget lsof tar"
if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
${INS} crontabs
else
${INS} cron
fi
judge "安装 crontab"
if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
touch /var/spool/cron/root && chmod 600 /var/spool/cron/root
systemctl start crond && systemctl enable crond
else
touch /var/spool/cron/crontabs/root && chmod 600 /var/spool/cron/crontabs/root
systemctl start cron && systemctl enable cron
fi
judge "crontab 自启动配置 "
${INS} unzip
judge "安装 unzip"
${INS} curl
judge "安装 curl"
# upgrade systemd
${INS} systemd
judge "安装/升级 systemd"
# Nginx 后置 无需编译 不再需要
# if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
# yum -y groupinstall "Development tools"
# else
# ${INS} build-essential
# fi
# judge "编译工具包 安装"
if [[ "${ID}" == "centos" ]]; then
${INS} pcre pcre-devel zlib-devel epel-release openssl openssl-devel
elif [[ "${ID}" == "ol" ]]; then
${INS} pcre pcre-devel zlib-devel openssl openssl-devel
# Oracle Linux 不同日期版本的 VERSION_ID 比较乱 直接暴力处理
yum-config-manager --enable ol7_developer_EPEL >/dev/null 2>&1
yum-config-manager --enable ol8_developer_EPEL >/dev/null 2>&1
else
${INS} libpcre3 libpcre3-dev zlib1g-dev openssl libssl-dev
fi
${INS} jq
if ! command -v jq; then
wget -P /usr/bin https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/binary/jq && chmod +x /usr/bin/jq
judge "安装 jq"
fi
# 防止部分系统xray的默认bin目录缺失
mkdir /usr/local/bin >/dev/null 2>&1
}
function basic_optimization() {
# 最大文件打开数
sed -i '/^\*\ *soft\ *nofile\ *[[:digit:]]*/d' /etc/security/limits.conf
sed -i '/^\*\ *hard\ *nofile\ *[[:digit:]]*/d' /etc/security/limits.conf
echo '* soft nofile 65536' >>/etc/security/limits.conf
echo '* hard nofile 65536' >>/etc/security/limits.conf
# 关闭 Selinux
if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
setenforce 0
fi
}
function domain_check() {
read -rp "请输入你的域名信息(eg: www.wulabing.com):" domain
domain_ip=$(ping "${domain}" -c 1 | sed '1{s/[^(]*(//;s/).*//;q}')
print_ok "正在获取 IP 地址信息,请耐心等待"
local_ip=$(curl -4L api64.ipify.org)
echo -e "域名通过 DNS 解析的 IP 地址:${domain_ip}"
echo -e "本机公网 IP 地址: ${local_ip}"
sleep 2
if [[ ${domain_ip} == "${local_ip}" ]]; then
print_ok "域名通过 DNS 解析的 IP 地址与 本机 IP 地址匹配"
sleep 2
else
print_error "请确保域名添加了正确的 A 记录,否则将无法正常使用 xray"
print_error "域名通过 DNS 解析的 IP 地址与 本机 IP 地址不匹配是否继续安装y/n" && read -r install
case $install in
[yY][eE][sS] | [yY])
print_ok "继续安装"
sleep 2
;;
*)
print_error "安装终止"
exit 2
;;
esac
fi
}
function port_exist_check() {
if [[ 0 -eq $(lsof -i:"$1" | grep -i -c "listen") ]]; then
print_ok "$1 端口未被占用"
sleep 1
else
print_error "检测到 $1 端口被占用,以下为 $1 端口占用信息"
lsof -i:"$1"
print_error "5s 后将尝试自动 kill 占用进程"
sleep 5
lsof -i:"$1" | awk '{print $2}' | grep -v "PID" | xargs kill -9
print_ok "kill 完成"
sleep 1
fi
}
function update_sh() {
ol_version=$(curl -L -s https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/install.sh | grep "shell_version=" | head -1 | awk -F '=|"' '{print $3}')
if [[ "$shell_version" != "$(echo -e "$shell_version\n$ol_version" | sort -rV | head -1)" ]]; then
print_ok "存在新版本,是否更新 [Y/N]?"
read -r update_confirm
case $update_confirm in
[yY][eE][sS] | [yY])
wget -N --no-check-certificate https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/install.sh
print_ok "更新完成"
print_ok "您可以通过 bash $0 执行本程序"
exit 0
;;
*) ;;
esac
else
print_ok "当前版本为最新版本"
print_ok "您可以通过 bash $0 执行本程序"
fi
}
function xray_tmp_config_file_check_and_use() {
if [[ -s ${xray_conf_dir}/config_tmp.json ]]; then
mv -f ${xray_conf_dir}/config_tmp.json ${xray_conf_dir}/config.json
else
print_error "xray 配置文件修改异常"
fi
}
function modify_UUID() {
[ -z "$UUID" ] && UUID=$(cat /proc/sys/kernel/random/uuid)
cat ${xray_conf_dir}/config.json | jq 'setpath(["inbounds",0,"settings","clients",0,"id"];"'${UUID}'")' >${xray_conf_dir}/config_tmp.json
xray_tmp_config_file_check_and_use
judge "Xray TCP UUID 修改"
}
function modify_UUID_ws() {
cat ${xray_conf_dir}/config.json | jq 'setpath(["inbounds",1,"settings","clients",0,"id"];"'${UUID}'")' >${xray_conf_dir}/config_tmp.json
xray_tmp_config_file_check_and_use
judge "Xray ws UUID 修改"
}
function modify_fallback_ws() {
cat ${xray_conf_dir}/config.json | jq 'setpath(["inbounds",0,"settings","fallbacks",2,"path"];"'${WS_PATH}'")' >${xray_conf_dir}/config_tmp.json
xray_tmp_config_file_check_and_use
judge "Xray fallback_ws 修改"
}
function modify_ws() {
cat ${xray_conf_dir}/config.json | jq 'setpath(["inbounds",1,"streamSettings","wsSettings","path"];"'${WS_PATH}'")' >${xray_conf_dir}/config_tmp.json
xray_tmp_config_file_check_and_use
judge "Xray ws 修改"
}
function configure_nginx() {
nginx_conf="/etc/nginx/conf.d/${domain}.conf"
cd /etc/nginx/conf.d/ && rm -f ${domain}.conf && wget -O ${domain}.conf https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/config/web.conf
sed -i "s/xxx/${domain}/g" ${nginx_conf}
judge "Nginx config modify"
systemctl restart nginx
}
function modify_port() {
read -rp "请输入端口号(默认443)" PORT
[ -z "$PORT" ] && PORT="443"
if [[ $PORT -le 0 ]] || [[ $PORT -gt 65535 ]]; then
print_error "请输入 0-65535 之间的值"
exit 1
fi
port_exist_check $PORT
cat ${xray_conf_dir}/config.json | jq 'setpath(["inbounds",0,"port"];'${PORT}')' >${xray_conf_dir}/config_tmp.json
xray_tmp_config_file_check_and_use
judge "Xray 端口 修改"
}
function configure_xray() {
cd /usr/local/etc/xray && rm -f config.json && wget -O config.json https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/config/xray_xtls-rprx-direct.json
modify_UUID
modify_port
}
function configure_xray_ws() {
cd /usr/local/etc/xray && rm -f config.json && wget -O config.json https://raw.githubusercontent.com/wulabing/Xray_onekey/${github_branch}/config/xray_tls_ws_mix-rprx-direct.json
modify_UUID
modify_UUID_ws
modify_port
modify_fallback_ws
modify_ws
}
function xray_install() {
print_ok "安装 Xray"
curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh | bash -s -- install
judge "Xray 安装"
# 用于生成 Xray 的导入链接
echo $domain >$domain_tmp_dir/domain
judge "域名记录"
}
function ssl_install() {
# 使用 Nginx 配合签发 无需安装相关依赖
# if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
# ${INS} socat nc
# else
# ${INS} socat netcat
# fi
# judge "安装 SSL 证书生成脚本依赖"
curl -L get.acme.sh | bash
judge "安装 SSL 证书生成脚本"
}
function acme() {
"$HOME"/.acme.sh/acme.sh --set-default-ca --server letsencrypt
sed -i "6s/^/#/" "$nginx_conf"
sed -i "6a\\\troot $website_dir;" "$nginx_conf"
systemctl restart nginx
if "$HOME"/.acme.sh/acme.sh --issue -d "${domain}" --webroot "$website_dir" -k ec-256 --force; then
print_ok "SSL 证书生成成功"
sleep 2
if "$HOME"/.acme.sh/acme.sh --installcert -d "${domain}" --fullchainpath /ssl/xray.crt --keypath /ssl/xray.key --reloadcmd "systemctl restart xray" --ecc --force; then
print_ok "SSL 证书配置成功"
sleep 2
fi
else
print_error "SSL 证书生成失败"
rm -rf "$HOME/.acme.sh/${domain}_ecc"
exit 1
fi
sed -i "7d" "$nginx_conf"
sed -i "6s/#//" "$nginx_conf"
}
function ssl_judge_and_install() {
mkdir -p /ssl >/dev/null 2>&1
if [[ -f "/ssl/xray.key" || -f "/ssl/xray.crt" ]]; then
print_ok "/ssl 目录下证书文件已存在"
print_ok "是否删除 /ssl 目录下的证书文件 [Y/N]?"
read -r ssl_delete
case $ssl_delete in
[yY][eE][sS] | [yY])
rm -rf /ssl/*
print_ok "已删除"
;;
*) ;;
esac
fi
if [[ -f "/ssl/xray.key" || -f "/ssl/xray.crt" ]]; then
echo "证书文件已存在"
elif [[ -f "$HOME/.acme.sh/${domain}_ecc/${domain}.key" && -f "$HOME/.acme.sh/${domain}_ecc/${domain}.cer" ]]; then
echo "证书文件已存在"
"$HOME"/.acme.sh/acme.sh --installcert -d "${domain}" --fullchainpath /ssl/xray.crt --keypath /ssl/xray.key --ecc
judge "证书应用"
else
mkdir /ssl
cp -a $cert_dir/self_signed_cert.pem /ssl/xray.crt
cp -a $cert_dir/self_signed_key.pem /ssl/xray.key
ssl_install
acme
fi
# Xray 默认以 nobody 用户运行,证书权限适配
chown -R nobody.$cert_group /ssl/*
}
function generate_certificate() {
signedcert=$(xray tls cert -domain="$local_ip" -name="$local_ip" -org="$local_ip" -expire=87600h)
echo $signedcert | jq '.certificate[]' | sed 's/\"//g' | tee $cert_dir/self_signed_cert.pem
echo $signedcert | jq '.key[]' | sed 's/\"//g' >$cert_dir/self_signed_key.pem
openssl x509 -in $cert_dir/self_signed_cert.pem -noout || 'print_error "生成自签名证书失败" && exit 1'
print_ok "生成自签名证书成功"
chown nobody.$cert_group $cert_dir/self_signed_cert.pem
chown nobody.$cert_group $cert_dir/self_signed_key.pem
}
function configure_web() {
rm -rf /www/xray_web
mkdir -p /www/xray_web
wget -O web.tar.gz https://raw.githubusercontent.com/wulabing/Xray_onekey/main/basic/web.tar.gz
tar xzf web.tar.gz -C /www/xray_web
judge "站点伪装"
rm -f web.tar.gz
}
function xray_uninstall() {
curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh | bash -s -- remove --purge
rm -rf $website_dir
print_ok "是否卸载nginx [Y/N]?"
read -r uninstall_nginx
case $uninstall_nginx in
[yY][eE][sS] | [yY])
if [[ "${ID}" == "centos" || "${ID}" == "ol" ]]; then
yum remove nginx -y
else
apt purge nginx -y
fi
;;
*) ;;
esac
print_ok "是否卸载acme.sh [Y/N]?"
read -r uninstall_acme
case $uninstall_acme in
[yY][eE][sS] | [yY])
/root/.acme.sh/acme.sh --uninstall
rm -rf /root/.acme.sh
rm -rf /ssl/
;;
*) ;;
esac
print_ok "卸载完成"
exit 0
}
function restart_all() {
systemctl restart nginx
judge "Nginx 启动"
systemctl restart xray
judge "Xray 启动"
}
function vless_xtls-rprx-direct_link() {
UUID=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].id | tr -d '"')
PORT=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].port)
FLOW=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].flow | tr -d '"')
DOMAIN=$(cat ${domain_tmp_dir}/domain)
print_ok "URL 链接VLESS + TCP + TLS"
print_ok "vless://$UUID@$DOMAIN:$PORT?security=tls&flow=$FLOW#TLS_wulabing-$DOMAIN"
print_ok "URL 链接VLESS + TCP + XTLS"
print_ok "vless://$UUID@$DOMAIN:$PORT?security=xtls&flow=$FLOW#XTLS_wulabing-$DOMAIN"
print_ok "-------------------------------------------------"
print_ok "URL 二维码VLESS + TCP + TLS请在浏览器中访问"
print_ok "https://api.qrserver.com/v1/create-qr-code/?size=400x400&data=vless://$UUID@$DOMAIN:$PORT?security=tls%26flow=$FLOW%23TLS_wulabing-$DOMAIN"
print_ok "URL 二维码VLESS + TCP + XTLS请在浏览器中访问"
print_ok "https://api.qrserver.com/v1/create-qr-code/?size=400x400&data=vless://$UUID@$DOMAIN:$PORT?security=xtls%26flow=$FLOW%23XTLS_wulabing-$DOMAIN"
}
function vless_xtls-rprx-direct_information() {
UUID=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].id | tr -d '"')
PORT=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].port)
FLOW=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].flow | tr -d '"')
DOMAIN=$(cat ${domain_tmp_dir}/domain)
echo -e "${Red} Xray 配置信息 ${Font}"
echo -e "${Red} 地址address:${Font} $DOMAIN"
echo -e "${Red} 端口port${Font} $PORT"
echo -e "${Red} 用户 IDUUID${Font} $UUID"
echo -e "${Red} 流控flow${Font} $FLOW"
echo -e "${Red} 加密方式security${Font} none "
echo -e "${Red} 传输协议network${Font} tcp "
echo -e "${Red} 伪装类型type${Font} none "
echo -e "${Red} 底层传输安全:${Font} xtls 或 tls"
}
function ws_information() {
UUID=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].id | tr -d '"')
PORT=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].port)
FLOW=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].flow | tr -d '"')
WS_PATH=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.fallbacks[2].path | tr -d '"')
DOMAIN=$(cat ${domain_tmp_dir}/domain)
echo -e "${Red} Xray 配置信息 ${Font}"
echo -e "${Red} 地址address:${Font} $DOMAIN"
echo -e "${Red} 端口port${Font} $PORT"
echo -e "${Red} 用户 IDUUID${Font} $UUID"
echo -e "${Red} 加密方式security${Font} none "
echo -e "${Red} 传输协议network${Font} ws "
echo -e "${Red} 伪装类型type${Font} none "
echo -e "${Red} 路径path${Font} $WS_PATH "
echo -e "${Red} 底层传输安全:${Font} tls "
}
function ws_link() {
UUID=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].id | tr -d '"')
PORT=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].port)
FLOW=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.clients[0].flow | tr -d '"')
WS_PATH=$(cat ${xray_conf_dir}/config.json | jq .inbounds[0].settings.fallbacks[2].path | tr -d '"')
WS_PATH_WITHOUT_SLASH=$(echo $WS_PATH | tr -d '/')
DOMAIN=$(cat ${domain_tmp_dir}/domain)
print_ok "URL 链接VLESS + TCP + TLS"
print_ok "vless://$UUID@$DOMAIN:$PORT?security=tls#TLS_wulabing-$DOMAIN"
print_ok "URL 链接VLESS + TCP + XTLS"
print_ok "vless://$UUID@$DOMAIN:$PORT?security=xtls&flow=$FLOW#XTLS_wulabing-$DOMAIN"
print_ok "URL 链接VLESS + WebSocket + TLS"
print_ok "vless://$UUID@$DOMAIN:$PORT?type=ws&security=tls&path=%2f${WS_PATH_WITHOUT_SLASH}%2f#WS_TLS_wulabing-$DOMAIN"
print_ok "-------------------------------------------------"
print_ok "URL 二维码VLESS + TCP + TLS请在浏览器中访问"
print_ok "https://api.qrserver.com/v1/create-qr-code/?size=400x400&data=vless://$UUID@$DOMAIN:$PORT?security=tls%23TLS_wulabing-$DOMAIN"
print_ok "URL 二维码VLESS + TCP + XTLS请在浏览器中访问"
print_ok "https://api.qrserver.com/v1/create-qr-code/?size=400x400&data=vless://$UUID@$DOMAIN:$PORT?security=xtls%26flow=$FLOW%23XTLS_wulabing-$DOMAIN"
print_ok "URL 二维码VLESS + WebSocket + TLS请在浏览器中访问"
print_ok "https://api.qrserver.com/v1/create-qr-code/?size=400x400&data=vless://$UUID@$DOMAIN:$PORT?type=ws%26security=tls%26path=%2f${WS_PATH_WITHOUT_SLASH}%2f%23WS_TLS_wulabing-$DOMAIN"
}
function basic_information() {
print_ok "VLESS+TCP+XTLS+Nginx 安装成功"
vless_xtls-rprx-direct_information
vless_xtls-rprx-direct_link
}
function basic_ws_information() {
print_ok "VLESS+TCP+TLS+Nginx with WebSocket 混合模式 安装成功"
ws_information
print_ok "————————————————————————"
vless_xtls-rprx-direct_information
ws_link
}
function show_access_log() {
[ -f ${xray_access_log} ] && tail -f ${xray_access_log} || echo -e "${RedBG}log文件不存在${Font}"
}
function show_error_log() {
[ -f ${xray_error_log} ] && tail -f ${xray_error_log} || echo -e "${RedBG}log文件不存在${Font}"
}
function bbr_boost_sh() {
[ -f "tcp.sh" ] && rm -rf ./tcp.sh
wget -N --no-check-certificate "https://raw.githubusercontent.com/ylx2016/Linux-NetSpeed/master/tcp.sh" && chmod +x tcp.sh && ./tcp.sh
}
function mtproxy_sh() {
wget -N --no-check-certificate "https://github.com/wulabing/mtp/raw/master/mtproxy.sh" && chmod +x mtproxy.sh && bash mtproxy.sh
}
function install_xray() {
is_root
system_check
dependency_install
basic_optimization
domain_check
port_exist_check 80
xray_install
configure_xray
nginx_install
configure_nginx
configure_web
generate_certificate
ssl_judge_and_install
restart_all
basic_information
}
function install_xray_ws() {
is_root
system_check
dependency_install
basic_optimization
domain_check
port_exist_check 80
xray_install
configure_xray_ws
nginx_install
configure_nginx
configure_web
generate_certificate
ssl_judge_and_install
restart_all
basic_ws_information
}
menu() {
#update_sh
shell_mode_check
echo -e "\t Xray 安装管理脚本 ${Red}[${shell_version}]${Font}"
echo -e "\t---authored by wulabing---"
echo -e "\thttps://github.com/wulabing\n"
echo -e "当前已安装版本:${shell_mode}"
echo -e "—————————————— 安装向导 ——————————————"""
echo -e "${Green}0.${Font} 升级 脚本"
echo -e "${Green}1.${Font} 安装 Xray (VLESS + TCP + XTLS / TLS + Nginx)"
echo -e "${Green}2.${Font} 安装 Xray (VLESS + TCP + XTLS / TLS + Nginx 及 VLESS + TCP + TLS + Nginx + WebSocket 回落并存模式)"
echo -e "—————————————— 配置变更 ——————————————"
echo -e "${Green}11.${Font} 变更 UUID"
echo -e "${Green}13.${Font} 变更 连接端口"
echo -e "${Green}14.${Font} 变更 WebSocket PATH"
echo -e "—————————————— 查看信息 ——————————————"
echo -e "${Green}21.${Font} 查看 实时访问日志"
echo -e "${Green}22.${Font} 查看 实时错误日志"
echo -e "${Green}23.${Font} 查看 Xray 配置链接"
# echo -e "${Green}23.${Font} 查看 V2Ray 配置信息"
echo -e "—————————————— 其他选项 ——————————————"
echo -e "${Green}31.${Font} 安装 4 合 1 BBR、锐速安装脚本"
echo -e "${Yellow}32.${Font} 安装 MTproxy(不推荐使用,请相关用户关闭或卸载)"
echo -e "${Green}33.${Font} 卸载 Xray"
echo -e "${Green}34.${Font} 更新 Xray-core"
echo -e "${Green}35.${Font} 安装 Xray-core 测试版(Pre)"
echo -e "${Green}36.${Font} 手动更新SSL证书"
echo -e "${Green}40.${Font} 退出"
read -rp "请输入数字:" menu_num
case $menu_num in
0)
update_sh
;;
1)
install_xray
;;
2)
install_xray_ws
;;
11)
read -rp "请输入UUID:" UUID
if [[ ${shell_mode} == "tcp" ]]; then
modify_UUID
elif [[ ${shell_mode} == "ws" ]]; then
modify_UUID
modify_UUID_ws
fi
restart_all
;;
13)
modify_port
restart_all
;;
14)
if [[ ${shell_mode} == "ws" ]]; then
read -rp "请输入路径(示例:/wulabing/ 要求两侧都包含/):" WS_PATH
modify_fallback_ws
modify_ws
restart_all
else
print_error "当前模式不是Websocket模式"
fi
;;
21)
tail -f $xray_access_log
;;
22)
tail -f $xray_error_log
;;
23)
if [[ -f $xray_conf_dir/config.json ]]; then
if [[ ${shell_mode} == "tcp" ]]; then
basic_information
elif [[ ${shell_mode} == "ws" ]]; then
basic_ws_information
fi
else
print_error "xray 配置文件不存在"
fi
;;
31)
bbr_boost_sh
;;
32)
mtproxy_sh
;;
33)
source '/etc/os-release'
xray_uninstall
;;
34)
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" - install
restart_all
;;
35)
bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" - install --beta
restart_all
;;
36)
"/root/.acme.sh"/acme.sh --cron --home "/root/.acme.sh"
restart_all
;;
40)
exit 0
;;
*)
print_error "请输入正确的数字"
;;
esac
}
menu "$@"

View File

@@ -1,911 +0,0 @@
#!/usr/bin/env bash
# The files installed by the script conform to the Filesystem Hierarchy Standard:
# https://wiki.linuxfoundation.org/lsb/fhs
# The URL of the script project is:
# https://github.com/XTLS/Xray-install
# The URL of the scraaaaaipt is:
# https://raw.githubusercontent.com/XTLS/Xray-install/main/install-release.sh
# If the script executes incorrectly, go to:
# https://github.com/XTLS/Xray-install/issues
# You can set this variable whatever you want in shell session right before running this script by issuing:
# export DAT_PATH='/usr/local/share/xray'
DAT_PATH=${DAT_PATH:-/usr/local/share/xray}
# You can set this variable whatever you want in shell session right before running this script by issuing:
# export JSON_PATH='/usr/local/etc/xray'
JSON_PATH=${JSON_PATH:-/usr/local/etc/xray}
# Set this variable only if you are starting xray with multiple configuration files:
# export JSONS_PATH='/usr/local/etc/xray'
# Set this variable only if you want this script to check all the systemd unit file:
# export check_all_service_files='yes'
# Gobal verbals
if [[ -f '/etc/systemd/system/xray.service' ]] && [[ -f '/usr/local/bin/xray' ]]; then
XRAY_IS_INSTALLED_BEFORE_RUNNING_SCRIPT=1
else
XRAY_IS_INSTALLED_BEFORE_RUNNING_SCRIPT=0
fi
# Xray current version
CURRENT_VERSION=''
# Xray latest release version
RELEASE_LATEST=''
# Xray latest prerelease/release version
PRE_RELEASE_LATEST=''
# Xray version will be installed
INSTALL_VERSION=''
# install
INSTALL='0'
# install-geodata
INSTALL_GEODATA='0'
# remove
REMOVE='0'
# help
HELP='0'
# check
CHECK='0'
# --force
FORCE='0'
# --beta
BETA='0'
# --install-user ?
INSTALL_USER=''
# --without-geodata
NO_GEODATA='0'
# --without-logfiles
NO_LOGFILES='0'
# --no-update-service
N_UP_SERVICE='0'
# --reinstall
REINSTALL='0'
# --version ?
SPECIFIED_VERSION=''
# --local ?
LOCAL_FILE=''
# --proxy ?
PROXY=''
# --purge
PURGE='0'
curl() {
$(type -P curl) -L -q --retry 5 --retry-delay 10 --retry-max-time 60 "$@"
}
systemd_cat_config() {
if systemd-analyze --help | grep -qw 'cat-config'; then
systemd-analyze --no-pager cat-config "$@"
echo
else
echo "${aoi}~~~~~~~~~~~~~~~~"
cat "$@" "$1".d/*
echo "${aoi}~~~~~~~~~~~~~~~~"
echo "${red}warning: ${green}The systemd version on the current operating system is too low."
echo "${red}warning: ${green}Please consider to upgrade the systemd or the operating system.${reset}"
echo
fi
}
check_if_running_as_root() {
# If you want to run as another user, please modify $EUID to be owned by this user
if [[ "$EUID" -ne '0' ]]; then
echo "error: You must run this script as root!"
exit 1
fi
}
identify_the_operating_system_and_architecture() {
if [[ "$(uname)" == 'Linux' ]]; then
case "$(uname -m)" in
'i386' | 'i686')
MACHINE='32'
;;
'amd64' | 'x86_64')
MACHINE='64'
;;
'armv5tel')
MACHINE='arm32-v5'
;;
'armv6l')
MACHINE='arm32-v6'
grep Features /proc/cpuinfo | grep -qw 'vfp' || MACHINE='arm32-v5'
;;
'armv7' | 'armv7l')
MACHINE='arm32-v7a'
grep Features /proc/cpuinfo | grep -qw 'vfp' || MACHINE='arm32-v5'
;;
'armv8' | 'aarch64')
MACHINE='arm64-v8a'
;;
'mips')
MACHINE='mips32'
;;
'mipsle')
MACHINE='mips32le'
;;
'mips64')
MACHINE='mips64'
lscpu | grep -q "Little Endian" && MACHINE='mips64le'
;;
'mips64le')
MACHINE='mips64le'
;;
'ppc64')
MACHINE='ppc64'
;;
'ppc64le')
MACHINE='ppc64le'
;;
'riscv64')
MACHINE='riscv64'
;;
's390x')
MACHINE='s390x'
;;
*)
echo "error: The architecture is not supported."
exit 1
;;
esac
if [[ ! -f '/etc/os-release' ]]; then
echo "error: Don't use outdated Linux distributions."
exit 1
fi
# Do not combine this judgment condition with the following judgment condition.
## Be aware of Linux distribution like Gentoo, which kernel supports switch between Systemd and OpenRC.
if [[ -f /.dockerenv ]] || grep -q 'docker\|lxc' /proc/1/cgroup && [[ "$(type -P systemctl)" ]]; then
true
elif [[ -d /run/systemd/system ]] || grep -q systemd <(ls -l /sbin/init); then
true
else
echo "error: Only Linux distributions using systemd are supported."
exit 1
fi
if [[ "$(type -P apt)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='apt -y --no-install-recommends install'
PACKAGE_MANAGEMENT_REMOVE='apt purge'
package_provide_tput='ncurses-bin'
elif [[ "$(type -P dnf)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='dnf -y install'
PACKAGE_MANAGEMENT_REMOVE='dnf remove'
package_provide_tput='ncurses'
elif [[ "$(type -P yum)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='yum -y install'
PACKAGE_MANAGEMENT_REMOVE='yum remove'
package_provide_tput='ncurses'
elif [[ "$(type -P zypper)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='zypper install -y --no-recommends'
PACKAGE_MANAGEMENT_REMOVE='zypper remove'
package_provide_tput='ncurses-utils'
elif [[ "$(type -P pacman)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='pacman -Syu --noconfirm'
PACKAGE_MANAGEMENT_REMOVE='pacman -Rsn'
package_provide_tput='ncurses'
elif [[ "$(type -P emerge)" ]]; then
PACKAGE_MANAGEMENT_INSTALL='emerge -v'
PACKAGE_MANAGEMENT_REMOVE='emerge -Cv'
package_provide_tput='ncurses'
else
echo "error: The script does not support the package manager in this operating system."
exit 1
fi
else
echo "error: This operating system is not supported."
exit 1
fi
}
## Demo function for processing parameters
judgment_parameters() {
local local_install='0'
local temp_version='0'
while [[ "$#" -gt '0' ]]; do
case "$1" in
'install')
INSTALL='1'
;;
'install-geodata')
INSTALL_GEODATA='1'
;;
'remove')
REMOVE='1'
;;
'help')
HELP='1'
;;
'check')
CHECK='1'
;;
'--without-geodata')
NO_GEODATA='1'
;;
'--without-logfiles')
NO_LOGFILES='1'
;;
'--purge')
PURGE='1'
;;
'--version')
if [[ -z "$2" ]]; then
echo "error: Please specify the correct version."
exit 1
fi
temp_version='1'
SPECIFIED_VERSION="$2"
shift
;;
'-f' | '--force')
FORCE='1'
;;
'--beta')
BETA='1'
;;
'-l' | '--local')
local_install='1'
if [[ -z "$2" ]]; then
echo "error: Please specify the correct local file."
exit 1
fi
LOCAL_FILE="$2"
shift
;;
'-p' | '--proxy')
if [[ -z "$2" ]]; then
echo "error: Please specify the proxy server address."
exit 1
fi
PROXY="$2"
shift
;;
'-u' | '--install-user')
if [[ -z "$2" ]]; then
echo "error: Please specify the install user.}"
exit 1
fi
INSTALL_USER="$2"
shift
;;
'--reinstall')
REINSTALL='1'
;;
'--no-update-service')
N_UP_SERVICE='1'
;;
*)
echo "$0: unknown option -- -"
exit 1
;;
esac
shift
done
if ((INSTALL+INSTALL_GEODATA+HELP+CHECK+REMOVE==0)); then
INSTALL='1'
elif ((INSTALL+INSTALL_GEODATA+HELP+CHECK+REMOVE>1)); then
echo 'You can only choose one action.'
exit 1
fi
if [[ "$INSTALL" -eq '1' ]] && ((temp_version+local_install+REINSTALL+BETA>1)); then
echo "--version,--reinstall,--beta and --local can't be used together."
exit 1
fi
}
check_install_user() {
if [[ -z "$INSTALL_USER" ]]; then
if [[ -f '/usr/local/bin/xray' ]]; then
INSTALL_USER="$(grep '^[ '$'\t]*User[ '$'\t]*=' /etc/systemd/system/xray.service | tail -n 1 | awk -F = '{print $2}' | awk '{print $1}')"
if [[ -z "$INSTALL_USER" ]]; then
INSTALL_USER='root'
fi
else
INSTALL_USER='nobody'
fi
fi
if ! id $INSTALL_USER > /dev/null 2>&1; then
echo "the user '$INSTALL_USER' is not effective"
exit 1
fi
INSTALL_USER_UID="$(id -u $INSTALL_USER)"
INSTALL_USER_GID="$(id -g $INSTALL_USER)"
}
install_software() {
package_name="$1"
file_to_detect="$2"
type -P "$file_to_detect" > /dev/null 2>&1 && return
if ${PACKAGE_MANAGEMENT_INSTALL} "$package_name"; then
echo "info: $package_name is installed."
else
echo "error: Installation of $package_name failed, please check your network."
exit 1
fi
}
get_current_version() {
# Get the CURRENT_VERSION
if [[ -f '/usr/local/bin/xray' ]]; then
CURRENT_VERSION="$(/usr/local/bin/xray -version | awk 'NR==1 {print $2}')"
CURRENT_VERSION="v${CURRENT_VERSION#v}"
else
CURRENT_VERSION=""
fi
}
get_latest_version() {
# Get Xray latest release version number
local tmp_file
tmp_file="$(mktemp)"
if ! curl -x "${PROXY}" -sS -H "Accept: application/vnd.github.v3+json" -o "$tmp_file" 'https://api.github.com/repos/XTLS/Xray-core/releases/latest'; then
"rm" "$tmp_file"
echo 'error: Failed to get release list, please check your network.'
exit 1
fi
RELEASE_LATEST="$(sed 'y/,/\n/' "$tmp_file" | grep 'tag_name' | awk -F '"' '{print $4}')"
if [[ -z "$RELEASE_LATEST" ]]; then
if grep -q "API rate limit exceeded" "$tmp_file"; then
echo "error: github API rate limit exceeded"
else
echo "error: Failed to get the latest release version."
echo "Welcome bug report:https://github.com/XTLS/Xray-install/issues"
fi
"rm" "$tmp_file"
exit 1
fi
"rm" "$tmp_file"
RELEASE_LATEST="v${RELEASE_LATEST#v}"
if ! curl -x "${PROXY}" -sS -H "Accept: application/vnd.github.v3+json" -o "$tmp_file" 'https://api.github.com/repos/XTLS/Xray-core/releases'; then
"rm" "$tmp_file"
echo 'error: Failed to get release list, please check your network.'
exit 1
fi
local releases_list
releases_list=($(sed 'y/,/\n/' "$tmp_file" | grep 'tag_name' | awk -F '"' '{print $4}'))
if [[ "${#releases_list[@]}" -eq '0' ]]; then
if grep -q "API rate limit exceeded" "$tmp_file"; then
echo "error: github API rate limit exceeded"
else
echo "error: Failed to get the latest release version."
echo "Welcome bug report:https://github.com/XTLS/Xray-install/issues"
fi
"rm" "$tmp_file"
exit 1
fi
local i
for i in ${!releases_list[@]}
do
releases_list[$i]="v${releases_list[$i]#v}"
grep -q "https://github.com/XTLS/Xray-core/releases/download/${releases_list[$i]}/Xray-linux-$MACHINE.zip" "$tmp_file" && break
done
"rm" "$tmp_file"
PRE_RELEASE_LATEST="${releases_list[$i]}"
}
version_gt() {
# compare two version
# 0: $1 > $2
# 1: $1 <= $2
if [[ "$1" != "$2" ]]; then
local temp_1_version_number="${1#v}"
local temp_1_major_version_number="${temp_1_version_number%%.*}"
local temp_1_minor_version_number
temp_1_minor_version_number="$(echo "$temp_1_version_number" | awk -F '.' '{print $2}')"
local temp_1_minimunm_version_number="${temp_1_version_number##*.}"
# shellcheck disable=SC2001
local temp_2_version_number="${2#v}"
local temp_2_major_version_number="${temp_2_version_number%%.*}"
local temp_2_minor_version_number
temp_2_minor_version_number="$(echo "$temp_2_version_number" | awk -F '.' '{print $2}')"
local temp_2_minimunm_version_number="${temp_2_version_number##*.}"
if [[ "$temp_1_major_version_number" -gt "$temp_2_major_version_number" ]]; then
return 0
elif [[ "$temp_1_major_version_number" -eq "$temp_2_major_version_number" ]]; then
if [[ "$temp_1_minor_version_number" -gt "$temp_2_minor_version_number" ]]; then
return 0
elif [[ "$temp_1_minor_version_number" -eq "$temp_2_minor_version_number" ]]; then
if [[ "$temp_1_minimunm_version_number" -gt "$temp_2_minimunm_version_number" ]]; then
return 0
else
return 1
fi
else
return 1
fi
else
return 1
fi
elif [[ "$1" == "$2" ]]; then
return 1
fi
}
download_xray() {
DOWNLOAD_LINK="https://github.com/XTLS/Xray-core/releases/download/$INSTALL_VERSION/Xray-linux-$MACHINE.zip"
echo "Downloading Xray archive: $DOWNLOAD_LINK"
if ! curl -x "${PROXY}" -R -H 'Cache-Control: no-cache' -o "$ZIP_FILE" "$DOWNLOAD_LINK"; then
echo 'error: Download failed! Please check your network or try again.'
return 1
fi
return 0
echo "Downloading verification file for Xray archive: $DOWNLOAD_LINK.dgst"
if ! curl -x "${PROXY}" -sSR -H 'Cache-Control: no-cache' -o "$ZIP_FILE.dgst" "$DOWNLOAD_LINK.dgst"; then
echo 'error: Download failed! Please check your network or try again.'
return 1
fi
if [[ "$(cat "$ZIP_FILE".dgst)" == 'Not Found' ]]; then
echo 'error: This version does not support verification. Please replace with another version.'
return 1
fi
# Verification of Xray archive
for LISTSUM in 'md5' 'sha1' 'sha256' 'sha512'; do
SUM="$(${LISTSUM}sum "$ZIP_FILE" | sed 's/ .*//')"
CHECKSUM="$(grep ${LISTSUM^^} "$ZIP_FILE".dgst | grep "$SUM" -o -a | uniq)"
if [[ "$SUM" != "$CHECKSUM" ]]; then
echo 'error: Check failed! Please check your network or try again.'
return 1
fi
done
}
decompression() {
if ! unzip -q "$1" -d "$TMP_DIRECTORY"; then
echo 'error: Xray decompression failed.'
"rm" -r "$TMP_DIRECTORY"
echo "removed: $TMP_DIRECTORY"
exit 1
fi
echo "info: Extract the Xray package to $TMP_DIRECTORY and prepare it for installation."
}
install_file() {
NAME="$1"
if [[ "$NAME" == 'xray' ]]; then
install -m 755 "${TMP_DIRECTORY}/$NAME" "/usr/local/bin/$NAME"
elif [[ "$NAME" == 'geoip.dat' ]] || [[ "$NAME" == 'geosite.dat' ]]; then
install -m 644 "${TMP_DIRECTORY}/$NAME" "${DAT_PATH}/$NAME"
fi
}
install_xray() {
# Install Xray binary to /usr/local/bin/ and $DAT_PATH
install_file xray
# If the file exists, geoip.dat and geosite.dat will not be installed or updated
if [[ "$NO_GEODATA" -eq '0' ]] && [[ ! -f "${DAT_PATH}/.undat" ]]; then
install -d "$DAT_PATH"
install_file geoip.dat
install_file geosite.dat
GEODATA='1'
fi
# Install Xray configuration file to $JSON_PATH
# shellcheck disable=SC2153
if [[ -z "$JSONS_PATH" ]] && [[ ! -d "$JSON_PATH" ]]; then
install -d "$JSON_PATH"
echo "{}" > "${JSON_PATH}/config.json"
CONFIG_NEW='1'
fi
# Install Xray configuration file to $JSONS_PATH
if [[ -n "$JSONS_PATH" ]] && [[ ! -d "$JSONS_PATH" ]]; then
install -d "$JSONS_PATH"
for BASE in 00_log 01_api 02_dns 03_routing 04_policy 05_inbounds 06_outbounds 07_transport 08_stats 09_reverse; do
echo '{}' > "${JSONS_PATH}/${BASE}.json"
done
CONFDIR='1'
fi
# Used to store Xray log files
if [[ "$NO_LOGFILES" -eq '0' ]]; then
if [[ ! -d '/var/log/xray/' ]]; then
install -d -m 700 -o "$INSTALL_USER_UID" -g "$INSTALL_USER_GID" /var/log/xray/
install -m 600 -o "$INSTALL_USER_UID" -g "$INSTALL_USER_GID" /dev/null /var/log/xray/access.log
install -m 600 -o "$INSTALL_USER_UID" -g "$INSTALL_USER_GID" /dev/null /var/log/xray/error.log
LOG='1'
else
chown -R "$INSTALL_USER_UID:$INSTALL_USER_GID" /var/log/xray/
fi
fi
}
install_startup_service_file() {
mkdir -p '/etc/systemd/system/xray.service.d'
mkdir -p '/etc/systemd/system/xray@.service.d/'
local temp_CapabilityBoundingSet="CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE"
local temp_AmbientCapabilities="AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE"
local temp_NoNewPrivileges="NoNewPrivileges=true"
if [[ "$INSTALL_USER_UID" -eq '0' ]]; then
temp_CapabilityBoundingSet="#${temp_CapabilityBoundingSet}"
temp_AmbientCapabilities="#${temp_AmbientCapabilities}"
temp_NoNewPrivileges="#${temp_NoNewPrivileges}"
fi
cat > /etc/systemd/system/xray.service << EOF
[Unit]
Description=Xray Service
Documentation=https://github.com/xtls
After=network.target nss-lookup.target
[Service]
User=$INSTALL_USER
${temp_CapabilityBoundingSet}
${temp_AmbientCapabilities}
${temp_NoNewPrivileges}
ExecStart=/usr/local/bin/xray run -config /usr/local/etc/xray/config.json
Restart=on-failure
RestartPreventExitStatus=23
LimitNPROC=10000
LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/systemd/system/xray@.service <<EOF
[Unit]
Description=Xray Service
Documentation=https://github.com/xtls
After=network.target nss-lookup.target
[Service]
User=$INSTALL_USER
${temp_CapabilityBoundingSet}
${temp_AmbientCapabilities}
${temp_NoNewPrivileges}
ExecStart=/usr/local/bin/xray run -config /usr/local/etc/xray/%i.json
Restart=on-failure
RestartPreventExitStatus=23
LimitNPROC=10000
LimitNOFILE=1000000
[Install]
WantedBy=multi-user.target
EOF
chmod 644 /etc/systemd/system/xray.service /etc/systemd/system/xray@.service
if [[ -n "$JSONS_PATH" ]]; then
"rm" '/etc/systemd/system/xray.service.d/10-donot_touch_single_conf.conf' \
'/etc/systemd/system/xray@.service.d/10-donot_touch_single_conf.conf'
echo "# In case you have a good reason to do so, duplicate this file in the same directory and make your customizes there.
# Or all changes you made will be lost! # Refer: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
[Service]
ExecStart=
ExecStart=/usr/local/bin/xray run -confdir $JSONS_PATH" |
tee '/etc/systemd/system/xray.service.d/10-donot_touch_multi_conf.conf' > \
'/etc/systemd/system/xray@.service.d/10-donot_touch_multi_conf.conf'
else
"rm" '/etc/systemd/system/xray.service.d/10-donot_touch_multi_conf.conf' \
'/etc/systemd/system/xray@.service.d/10-donot_touch_multi_conf.conf'
echo "# In case you have a good reason to do so, duplicate this file in the same directory and make your customizes there.
# Or all changes you made will be lost! # Refer: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
[Service]
ExecStart=
ExecStart=/usr/local/bin/xray run -config ${JSON_PATH}/config.json" > \
'/etc/systemd/system/xray.service.d/10-donot_touch_single_conf.conf'
echo "# In case you have a good reason to do so, duplicate this file in the same directory and make your customizes there.
# Or all changes you made will be lost! # Refer: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
[Service]
ExecStart=
ExecStart=/usr/local/bin/xray run -config ${JSON_PATH}/%i.json" > \
'/etc/systemd/system/xray@.service.d/10-donot_touch_single_conf.conf'
fi
echo "info: Systemd service files have been installed successfully!"
echo "${red}warning: ${green}The following are the actual parameters for the xray service startup."
echo "${red}warning: ${green}Please make sure the configuration file path is correctly set.${reset}"
systemd_cat_config /etc/systemd/system/xray.service
# shellcheck disable=SC2154
if [[ x"${check_all_service_files:0:1}" = x'y' ]]; then
echo
echo
systemd_cat_config /etc/systemd/system/xray@.service
fi
systemctl daemon-reload
SYSTEMD='1'
}
start_xray() {
if [[ -f '/etc/systemd/system/xray.service' ]]; then
systemctl start "${XRAY_CUSTOMIZE:-xray}"
sleep 1s
if systemctl -q is-active "${XRAY_CUSTOMIZE:-xray}"; then
echo 'info: Start the Xray service.'
else
echo 'error: Failed to start Xray service.'
exit 1
fi
fi
}
stop_xray() {
XRAY_CUSTOMIZE="$(systemctl list-units | grep 'xray@' | awk -F ' ' '{print $1}')"
if [[ -z "$XRAY_CUSTOMIZE" ]]; then
local xray_daemon_to_stop='xray.service'
else
local xray_daemon_to_stop="$XRAY_CUSTOMIZE"
fi
if ! systemctl stop "$xray_daemon_to_stop"; then
echo 'error: Stopping the Xray service failed.'
exit 1
fi
echo 'info: Stop the Xray service.'
}
install_geodata() {
download_geodata() {
if ! curl -x "${PROXY}" -R -H 'Cache-Control: no-cache' -o "${dir_tmp}/${2}" "${1}"; then
echo 'error: Download failed! Please check your network or try again.'
exit 1
fi
if ! curl -x "${PROXY}" -R -H 'Cache-Control: no-cache' -o "${dir_tmp}/${2}.sha256sum" "${1}.sha256sum"; then
echo 'error: Download failed! Please check your network or try again.'
exit 1
fi
}
local download_link_geoip="https://github.com/v2fly/geoip/releases/latest/download/geoip.dat"
local download_link_geosite="https://github.com/v2fly/domain-list-community/releases/latest/download/dlc.dat"
local file_ip='geoip.dat'
local file_dlc='dlc.dat'
local file_site='geosite.dat'
local dir_tmp
dir_tmp="$(mktemp -d)"
[[ "$XRAY_IS_INSTALLED_BEFORE_RUNNING_SCRIPT" -eq '0' ]] && echo "warning: Xray was not installed"
download_geodata $download_link_geoip $file_ip
download_geodata $download_link_geosite $file_dlc
cd "${dir_tmp}" || exit
for i in "${dir_tmp}"/*.sha256sum; do
if ! sha256sum -c "${i}"; then
echo 'error: Check failed! Please check your network or try again.'
exit 1
fi
done
cd - > /dev/null
install -d "$DAT_PATH"
install -m 644 "${dir_tmp}"/${file_dlc} "${DAT_PATH}"/${file_site}
install -m 644 "${dir_tmp}"/${file_ip} "${DAT_PATH}"/${file_ip}
rm -r "${dir_tmp}"
exit 0
}
check_update() {
if [[ "$XRAY_IS_INSTALLED_BEFORE_RUNNING_SCRIPT" -eq '1' ]]; then
get_current_version
echo "info: The current version of Xray is $CURRENT_VERSION ."
else
echo 'warning: Xray is not installed.'
fi
get_latest_version
echo "info: The latest release version of Xray is $RELEASE_LATEST ."
echo "info: The latest pre-release/release version of Xray is $PRE_RELEASE_LATEST ."
exit 0
}
remove_xray() {
if systemctl list-unit-files | grep -qw 'xray'; then
if [[ -n "$(pidof xray)" ]]; then
stop_xray
fi
local delete_files=('/usr/local/bin/xray' '/etc/systemd/system/xray.service' '/etc/systemd/system/xray@.service' '/etc/systemd/system/xray.service.d' '/etc/systemd/system/xray@.service.d')
[[ -d "$DAT_PATH" ]] && delete_files+=("$DAT_PATH")
if [[ "$PURGE" -eq '1' ]]; then
if [[ -z "$JSONS_PATH" ]]; then
delete_files+=("$JSON_PATH")
else
delete_files+=("$JSONS_PATH")
fi
[[ -d '/var/log/xray' ]] && delete_files+=('/var/log/xray')
fi
systemctl disable xray
if ! ("rm" -r "${delete_files[@]}"); then
echo 'error: Failed to remove Xray.'
exit 1
else
for i in ${!delete_files[@]}
do
echo "removed: ${delete_files[$i]}"
done
systemctl daemon-reload
echo "You may need to execute a command to remove dependent software: $PACKAGE_MANAGEMENT_REMOVE curl unzip"
echo 'info: Xray has been removed.'
if [[ "$PURGE" -eq '0' ]]; then
echo 'info: If necessary, manually delete the configuration and log files.'
if [[ -n "$JSONS_PATH" ]]; then
echo "info: e.g., $JSONS_PATH and /var/log/xray/ ..."
else
echo "info: e.g., $JSON_PATH and /var/log/xray/ ..."
fi
fi
exit 0
fi
else
echo 'error: Xray is not installed.'
exit 1
fi
}
# Explanation of parameters in the script
show_help() {
echo "usage: $0 ACTION [OPTION]..."
echo
echo 'ACTION:'
echo ' install Install/Update Xray'
echo ' install-geodata Install/Update geoip.dat and geosite.dat only'
echo ' remove Remove Xray'
echo ' help Show help'
echo ' check Check if Xray can be updated'
echo 'If no action is specified, then install will be selected'
echo
echo 'OPTION:'
echo ' install:'
echo ' --version Install the specified version of Xray, e.g., --version v1.0.0'
echo ' -f, --force Force install even though the versions are same'
echo ' --beta Install the pre-release version if it is exist'
echo ' -l, --local Install Xray from a local file'
echo ' -p, --proxy Download through a proxy server, e.g., -p http://127.0.0.1:8118 or -p socks5://127.0.0.1:1080'
echo ' -u, --install-user Install Xray in specified user, e.g, -u root'
echo ' --reinstall Reinstall current Xray version'
echo " --no-update-service Don't change service files if they are exist"
echo " --without-geodata Don't install/update geoip.dat and geosite.dat"
echo " --without-logfiles Don't install /var/log/xray"
echo ' install-geodata:'
echo ' -p, --proxy Download through a proxy server'
echo ' remove:'
echo ' --purge Remove all the Xray files, include logs, configs, etc'
echo ' check:'
echo ' -p, --proxy Check new version through a proxy server'
exit 0
}
main() {
check_if_running_as_root
identify_the_operating_system_and_architecture
judgment_parameters "$@"
install_software "$package_provide_tput" 'tput'
red=$(tput setaf 1)
green=$(tput setaf 2)
aoi=$(tput setaf 6)
reset=$(tput sgr0)
# Parameter information
[[ "$HELP" -eq '1' ]] && show_help
[[ "$CHECK" -eq '1' ]] && check_update
[[ "$REMOVE" -eq '1' ]] && remove_xray
[[ "$INSTALL_GEODATA" -eq '1' ]] && install_geodata
# Check if the user is effective
check_install_user
# Two very important variables
TMP_DIRECTORY="$(mktemp -d)"
ZIP_FILE="${TMP_DIRECTORY}/Xray-linux-$MACHINE.zip"
# Install Xray from a local file, but still need to make sure the network is available
if [[ -n "$LOCAL_FILE" ]]; then
echo 'warn: Install Xray from a local file, but still need to make sure the network is available.'
echo -n 'warn: Please make sure the file is valid because we cannot confirm it. (Press any key) ...'
read -r
install_software 'unzip' 'unzip'
decompression "$LOCAL_FILE"
else
get_current_version
if [[ "$REINSTALL" -eq '1' ]]; then
if [[ -z "$CURRENT_VERSION" ]]; then
echo "error: Xray is not installed"
exit 1
fi
INSTALL_VERSION="$CURRENT_VERSION"
echo "info: Reinstalling Xray $CURRENT_VERSION"
elif [[ -n "$SPECIFIED_VERSION" ]]; then
SPECIFIED_VERSION="v${SPECIFIED_VERSION#v}"
if [[ "$CURRENT_VERSION" == "$SPECIFIED_VERSION" ]] && [[ "$FORCE" -eq '0' ]]; then
echo "info: The current version is same as the specified version. The version is $CURRENT_VERSION ."
exit 0
fi
INSTALL_VERSION="$SPECIFIED_VERSION"
echo "info: Installing specified Xray version $INSTALL_VERSION for $(uname -m)"
else
install_software 'curl' 'curl'
get_latest_version
if [[ "$BETA" -eq '0' ]]; then
INSTALL_VERSION="$RELEASE_LATEST"
else
INSTALL_VERSION="$PRE_RELEASE_LATEST"
fi
if ! version_gt "$INSTALL_VERSION" "$CURRENT_VERSION" && [[ "$FORCE" -eq '0' ]]; then
echo "info: No new version. The current version of Xray is $CURRENT_VERSION ."
exit 0
fi
echo "info: Installing Xray $INSTALL_VERSION for $(uname -m)"
fi
install_software 'curl' 'curl'
install_software 'unzip' 'unzip'
if ! download_xray; then
"rm" -r "$TMP_DIRECTORY"
echo "removed: $TMP_DIRECTORY"
exit 1
fi
decompression "$ZIP_FILE"
fi
# Determine if Xray is running
if systemctl list-unit-files | grep -qw 'xray'; then
if [[ -n "$(pidof xray)" ]]; then
stop_xray
XRAY_RUNNING='1'
fi
fi
install_xray
([[ "$N_UP_SERVICE" -eq '1' ]] && [[ -f '/etc/systemd/system/xray.service' ]]) || install_startup_service_file
echo 'installed: /usr/local/bin/xray'
# If the file exists, the content output of installing or updating geoip.dat and geosite.dat will not be displayed
if [[ "$GEODATA" -eq '1' ]]; then
echo "installed: ${DAT_PATH}/geoip.dat"
echo "installed: ${DAT_PATH}/geosite.dat"
fi
if [[ "$CONFIG_NEW" -eq '1' ]]; then
echo "installed: ${JSON_PATH}/config.json"
fi
if [[ "$CONFDIR" -eq '1' ]]; then
echo "installed: ${JSON_PATH}/00_log.json"
echo "installed: ${JSON_PATH}/01_api.json"
echo "installed: ${JSON_PATH}/02_dns.json"
echo "installed: ${JSON_PATH}/03_routing.json"
echo "installed: ${JSON_PATH}/04_policy.json"
echo "installed: ${JSON_PATH}/05_inbounds.json"
echo "installed: ${JSON_PATH}/06_outbounds.json"
echo "installed: ${JSON_PATH}/07_transport.json"
echo "installed: ${JSON_PATH}/08_stats.json"
echo "installed: ${JSON_PATH}/09_reverse.json"
fi
if [[ "$LOG" -eq '1' ]]; then
echo 'installed: /var/log/xray/'
echo 'installed: /var/log/xray/access.log'
echo 'installed: /var/log/xray/error.log'
fi
if [[ "$SYSTEMD" -eq '1' ]]; then
echo 'installed: /etc/systemd/system/xray.service'
echo 'installed: /etc/systemd/system/xray@.service'
fi
"rm" -r "$TMP_DIRECTORY"
echo "removed: $TMP_DIRECTORY"
get_current_version
echo "info: Xray $CURRENT_VERSION is installed."
echo "You may need to execute a command to remove dependent software: $PACKAGE_MANAGEMENT_REMOVE curl unzip"
if [[ "$XRAY_IS_INSTALLED_BEFORE_RUNNING_SCRIPT" -eq '1' ]] && [[ "$FORCE" -eq '0' ]] && [[ "$REINSTALL" -eq '0' ]]; then
[[ "$XRAY_RUNNING" -eq '1' ]] && start_xray
else
systemctl start xray
systemctl enable xray
sleep 1s
if systemctl -q is-active xray; then
echo "info: Enable and start the Xray service"
else
echo "warning: Failed to enable and start the Xray service"
fi
fi
}
main "$@"

View File

@@ -0,0 +1,27 @@
- name: CF_TEST_1
type: vless
server: gur.gov.ua
port: 8443
uuid: 86c50e3a-5b87-49dd-bd20-03c7f2735e40
udp: false
tls: true
network: ws
servername: pp.icederce.ip-ddns.com
ws-opts:
path: "/?ed=2560"
headers:
Host: pp.icederce.ip-ddns.com
- name: CF_TEST_2
type: vless
server: www.csgo.com
port: 443
uuid: 86c50e3a-5b87-49dd-bd20-03c7f2735e40
udp: false
tls: true
network: ws
servername: pp.icederce.ip-ddns.com
ws-opts:
path: "/?ed=2560"
headers:
Host: pp.icederce.ip-ddns.com

View File

@@ -0,0 +1,772 @@
import { connect } from 'cloudflare:sockets';
let userID = '86c50e3a-5b87-49dd-bd20-03c7f2735e40';
let proxyIP = '';
// let socks5Address = 'zeaslity:cd28a746-283e-47cc-88f7-bb43d7f6b53a@140.238.8.73:28888';
let sock5User = "zeaslity"
let sock5Pass = "cd28a746-283e-47cc-88f7-bb43d7f6b53a"
let sock5Host = "140.238.8.73"
let sock5Port = 28888
let enableSocks = true;
let go2Socks5s = [
'*ttvnw.net',
'*tapecontent.net',
'*cloudatacdn.com',
'*.loadshare.org',
"whoer.net",
"whatismyipaddress.com",
"*.cloudflare.com",
"*.cloudflare.net",
"*.cloudflare.workers.dev",
"dnschecker.org",
"ip.sb",
"ipinfo.io"
];
const httpPorts = ["8080", "8880", "2052", "2082", "2086", "2095"];
let httpsPorts = ["2053", "2083", "2087", "2096", "8443"];
export default {
async fetch(request, env, ctx) {
try {
const UA = request.headers.get('User-Agent') || 'null';
const upgradeHeader = request.headers.get('Upgrade');
const url = new URL(request.url);
if (!upgradeHeader || upgradeHeader !== 'websocket') {
return new Response('Hello World! to ' + url);
}
// handle 请求
return await WddGoOverWSHandler(request);
} catch (err) {
let e = err;
return new Response(e.toString());
}
},
};
async function WddGoOverWSHandler(request) {
// @ts-ignore
const webSocketPair = new WebSocketPair();
const [client, webSocket] = Object.values(webSocketPair);
// 接受 WebSocket 连接
webSocket.accept();
let address = '';
let portWithRandomLog = '';
// 日志函数,用于记录连接信息
const log = (/** @type {string} */ info, /** @type {string | undefined} */ event) => {
console.log(`[${address}:${portWithRandomLog}] ${info}`, event || '');
};
// 获取早期数据头部,可能包含了一些初始化数据
const earlyDataHeader = request.headers.get('sec-websocket-protocol') || '';
// 创建一个可读的 WebSocket 流,用于接收客户端数据
const readableWebSocketStream = makeReadableWebSocketStream(webSocket, earlyDataHeader, log);
// 用于存储远程 Socket 的包装器
let remoteSocketWapper = {
value: null,
};
// 标记是否为 DNS 查询
let isDns = false;
// WebSocket 数据流向远程服务器的管道
readableWebSocketStream.pipeTo(new WritableStream({
async write(chunk, controller) {
if (isDns) {
// 如果是 DNS 查询,调用 DNS 处理函数
return await handleDNSQuery(chunk, webSocket, null, log);
}
if (remoteSocketWapper.value) {
// 如果已有远程 Socket直接写入数据
const writer = remoteSocketWapper.value.writable.getWriter()
await writer.write(chunk);
writer.releaseLock();
return;
}
// 处理 WddGo 协议头部
const {
hasError,
message,
addressType,
portRemote = 443,
addressRemote = '',
rawDataIndex,
WddGoVersion = new Uint8Array([0, 0]),
isUDP,
} = processWddGoHeader(chunk, userID);
// 设置地址和端口信息,用于日志
address = addressRemote;
portWithRandomLog = `${portRemote}--${Math.random()} ${isUDP ? 'udp ' : 'tcp '} `;
if (hasError) {
// 如果有错误,抛出异常
throw new Error(message);
return;
}
// 如果是 UDP 且端口不是 DNS 端口53则关闭连接
if (isUDP) {
if (portRemote === 53) {
isDns = true;
} else {
throw new Error('UDP 代理仅对 DNS53 端口)启用');
return;
}
}
// 构建 WddGo 响应头部
const WddGoResponseHeader = new Uint8Array([WddGoVersion[0], 0]);
// 获取实际的客户端数据
const rawClientData = chunk.slice(rawDataIndex);
if (isDns) {
// 如果是 DNS 查询,调用 DNS 处理函数
return handleDNSQuery(rawClientData, webSocket, WddGoResponseHeader, log);
}
// 处理 TCP 出站连接
log(`处理 TCP 出站连接 ${addressRemote}:${portRemote}`);
handleTCPOutBound(remoteSocketWapper, addressType, addressRemote, portRemote, rawClientData, webSocket, WddGoResponseHeader, log);
},
close() {
log(`readableWebSocketStream 已关闭`);
},
abort(reason) {
log(`readableWebSocketStream 已中止`, JSON.stringify(reason));
},
})).catch((err) => {
log('readableWebSocketStream 管道错误', err);
});
// 返回一个 WebSocket 升级的响应
return new Response(null, {
status: 101,
// @ts-ignore
webSocket: client,
});
}
async function handleTCPOutBound(remoteSocket, addressType, addressRemote, portRemote, rawClientData, webSocket, WddGoResponseHeader, log,) {
async function useSocks5Pattern(address) {
if (go2Socks5s.includes(address) || go2Socks5s.includes(address)) return true;
return go2Socks5s.some(pattern => {
let regexPattern = pattern.replace(/\*/g, '.*');
let regex = new RegExp(`^${regexPattern}$`, 'i');
return regex.test(address);
});
}
async function connectAndWrite(address, port, socks = false) {
log(`connected to ${address}:${port}`);
//if (/^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(address)) address = `${atob('d3d3Lg==')}${address}${atob('LmlwLjA5MDIyNy54eXo=')}`;
// 如果指定使用 SOCKS5 代理,则通过 SOCKS5 协议连接;否则直接连接
const tcpSocket = socks ? await socks5Connect(addressType, address, port, log)
: connect({
hostname: address,
port: port,
});
remoteSocket.value = tcpSocket;
//log(`connected to ${address}:${port}`);
const writer = tcpSocket.writable.getWriter();
// 首次写入,通常是 TLS 客户端 Hello 消息
await writer.write(rawClientData);
writer.releaseLock();
return tcpSocket;
}
/**
* 重试函数:当 Cloudflare 的 TCP Socket 没有传入数据时,我们尝试重定向 IP
* 这可能是因为某些网络问题导致的连接失败
*/
async function retry() {
if (enableSocks) {
// 如果启用了 SOCKS5通过 SOCKS5 代理重试连接
tcpSocket = await connectAndWrite(addressRemote, portRemote, true);
} else {
// 使用代理 IP
proxyIP = '[2603:c022:8008:8923:88a0:5c7d:2bb6:6ed5]'
portRemote = 27443
tcpSocket = await connectAndWrite(proxyIP || addressRemote, portRemote);
}
// 无论重试是否成功,都要关闭 WebSocket可能是为了重新建立连接
tcpSocket.closed.catch(error => {
console.log('retry tcpSocket closed error', error);
}).finally(() => {
safeCloseWebSocket(webSocket);
})
// 建立从远程 Socket 到 WebSocket 的数据流
remoteSocketToWS(tcpSocket, webSocket, WddGoResponseHeader, null, log);
}
let useSocks = false;
if (go2Socks5s.length > 0 && enableSocks) useSocks = await useSocks5Pattern(addressRemote);
// 首次尝试连接远程服务器
let tcpSocket = await connectAndWrite(addressRemote, portRemote, useSocks);
// 当远程 Socket 就绪时,将其传递给 WebSocket
// 建立从远程服务器到 WebSocket 的数据流,用于将远程服务器的响应发送回客户端
// 如果连接失败或无数据retry 函数将被调用进行重试
remoteSocketToWS(tcpSocket, webSocket, WddGoResponseHeader, retry, log);
}
function makeReadableWebSocketStream(webSocketServer, earlyDataHeader, log) {
// 标记可读流是否已被取消
let readableStreamCancel = false;
// 创建一个新的可读流
const stream = new ReadableStream({
// 当流开始时的初始化函数
start(controller) {
// 监听 WebSocket 的消息事件
webSocketServer.addEventListener('message', (event) => {
// 如果流已被取消,不再处理新消息
if (readableStreamCancel) {
return;
}
const message = event.data;
// 将消息加入流的队列中
controller.enqueue(message);
});
// 监听 WebSocket 的关闭事件
// 注意:这个事件意味着客户端关闭了客户端 -> 服务器的流
// 但是,服务器 -> 客户端的流仍然打开,直到在服务器端调用 close()
// WebSocket 协议要求在每个方向上都要发送单独的关闭消息,以完全关闭 Socket
webSocketServer.addEventListener('close', () => {
// 客户端发送了关闭信号,需要关闭服务器端
safeCloseWebSocket(webSocketServer);
// 如果流未被取消,则关闭控制器
if (readableStreamCancel) {
return;
}
controller.close();
});
// 监听 WebSocket 的错误事件
webSocketServer.addEventListener('error', (err) => {
log('WebSocket 服务器发生错误');
// 将错误传递给控制器
controller.error(err);
});
// 处理 WebSocket 0-RTT零往返时间的早期数据
// 0-RTT 允许在完全建立连接之前发送数据,提高了效率
const { earlyData, error } = base64ToArrayBuffer(earlyDataHeader);
if (error) {
// 如果解码早期数据时出错,将错误传递给控制器
controller.error(error);
} else if (earlyData) {
// 如果有早期数据,将其加入流的队列中
controller.enqueue(earlyData);
}
},
// 当使用者从流中拉取数据时调用
pull(controller) {
// 这里可以实现反压机制
// 如果 WebSocket 可以在流满时停止读取,我们就可以实现反压
// 参考https://streams.spec.whatwg.org/#example-rs-push-backpressure
},
// 当流被取消时调用
cancel(reason) {
// 流被取消的几种情况:
// 1. 当管道的 WritableStream 有错误时,这个取消函数会被调用,所以在这里处理 WebSocket 服务器的关闭
// 2. 如果 ReadableStream 被取消,所有 controller.close/enqueue 都需要跳过
// 3. 但是经过测试,即使 ReadableStream 被取消controller.error 仍然有效
if (readableStreamCancel) {
return;
}
log(`可读流被取消,原因是 ${reason}`);
readableStreamCancel = true;
// 安全地关闭 WebSocket
safeCloseWebSocket(webSocketServer);
}
});
return stream;
}
/**
* 解析 WddGo 协议的头部数据
* @param { ArrayBuffer} WddGoBuffer WddGo 协议的原始头部数据
* @param {string} userID 用于验证的用户 ID
* @returns {Object} 解析结果,包括是否有错误、错误信息、远程地址信息等
*/
function processWddGoHeader(WddGoBuffer, userID) {
// 检查数据长度是否足够(至少需要 24 字节)
if (WddGoBuffer.byteLength < 24) {
return {
hasError: true,
message: 'invalid data',
};
}
// 解析 WddGo 协议版本(第一个字节)
const version = new Uint8Array(WddGoBuffer.slice(0, 1));
let isValidUser = false;
let isUDP = false;
// 验证用户 ID接下来的 16 个字节)
function isUserIDValid(userID, buffer) {
const userIDArray = new Uint8Array(buffer.slice(1, 17));
const userIDString = stringify(userIDArray);
return userIDString === userID
}
// 使用函数验证
isValidUser = isUserIDValid(userID, WddGoBuffer);
// 如果用户 ID 无效,返回错误
if (!isValidUser) {
return {
hasError: true,
message: `invalid user ${(new Uint8Array(WddGoBuffer.slice(1, 17)))}`,
};
}
// 获取附加选项的长度(第 17 个字节)
const optLength = new Uint8Array(WddGoBuffer.slice(17, 18))[0];
// 暂时跳过附加选项
// 解析命令(紧跟在选项之后的 1 个字节)
// 0x01: TCP, 0x02: UDP, 0x03: MUX多路复用
const command = new Uint8Array(
WddGoBuffer.slice(18 + optLength, 18 + optLength + 1)
)[0];
// 0x01 TCP
// 0x02 UDP
// 0x03 MUX
if (command === 1) {
// TCP 命令,不需特殊处理
} else if (command === 2) {
// UDP 命令
isUDP = true;
} else {
// 不支持的命令
return {
hasError: true,
message: `command ${command} is not support, command 01-tcp,02-udp,03-mux`,
};
}
// 解析远程端口大端序2 字节)
const portIndex = 18 + optLength + 1;
const portBuffer = WddGoBuffer.slice(portIndex, portIndex + 2);
// port is big-Endian in raw data etc 80 == 0x005d
const portRemote = new DataView(portBuffer).getUint16(0);
// 解析地址类型和地址
let addressIndex = portIndex + 2;
const addressBuffer = new Uint8Array(
WddGoBuffer.slice(addressIndex, addressIndex + 1)
);
// 地址类型1-IPv4(4字节), 2-域名(可变长), 3-IPv6(16字节)
const addressType = addressBuffer[0];
let addressLength = 0;
let addressValueIndex = addressIndex + 1;
let addressValue = '';
switch (addressType) {
case 1:
// IPv4 地址
addressLength = 4;
// 将 4 个字节转为点分十进制格式
addressValue = new Uint8Array(
WddGoBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
).join('.');
break;
case 2:
// 域名
// 第一个字节是域名长度
addressLength = new Uint8Array(
WddGoBuffer.slice(addressValueIndex, addressValueIndex + 1)
)[0];
addressValueIndex += 1;
// 解码域名
addressValue = new TextDecoder().decode(
WddGoBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
);
break;
case 3:
// IPv6 地址
addressLength = 16;
const dataView = new DataView(
WddGoBuffer.slice(addressValueIndex, addressValueIndex + addressLength)
);
// 每 2 字节构成 IPv6 地址的一部分
const ipv6 = [];
for (let i = 0; i < 8; i++) {
ipv6.push(dataView.getUint16(i * 2).toString(16));
}
addressValue = ipv6.join(':');
// seems no need add [] for ipv6
break;
default:
// 无效的地址类型
return {
hasError: true,
message: `invild addressType is ${addressType}`,
};
}
// 确保地址不为空
if (!addressValue) {
return {
hasError: true,
message: `addressValue is empty, addressType is ${addressType}`,
};
}
// 返回解析结果
return {
hasError: false,
addressRemote: addressValue, // 解析后的远程地址
addressType, // 地址类型
portRemote, // 远程端口
rawDataIndex: addressValueIndex + addressLength, // 原始数据的实际起始位置
WddGoVersion: version, // WddGo 协议版本
isUDP, // 是否是 UDP 请求
};
}
async function remoteSocketToWS(remoteSocket, webSocket, WddGoResponseHeader, retry, log) {
// 将数据从远程服务器转发到 WebSocket
let remoteChunkCount = 0;
let chunks = [];
/** @type {ArrayBuffer | null} */
let WddGoHeader = WddGoResponseHeader;
let hasIncomingData = false; // 检查远程 Socket 是否有传入数据
// 使用管道将远程 Socket 的可读流连接到一个可写流
await remoteSocket.readable
.pipeTo(
new WritableStream({
start() {
// 初始化时不需要任何操作
},
/**
* 处理每个数据块
* @param {Uint8Array} chunk 数据块
* @param {*} controller 控制器
*/
async write(chunk, controller) {
hasIncomingData = true; // 标记已收到数据
// remoteChunkCount++; // 用于流量控制,现在似乎不需要了
// 检查 WebSocket 是否处于开放状态
if (webSocket.readyState !== WS_READY_STATE_OPEN) {
controller.error(
'webSocket.readyState is not open, maybe close'
);
}
if (WddGoHeader) {
// 如果有 WddGo 响应头部,将其与第一个数据块一起发送
webSocket.send(await new Blob([WddGoHeader, chunk]).arrayBuffer());
WddGoHeader = null; // 清空头部,之后不再发送
} else {
// 直接发送数据块
// 以前这里有流量控制代码,限制大量数据的发送速率
// 但现在 Cloudflare 似乎已经修复了这个问题
// if (remoteChunkCount > 20000) {
// // cf one package is 4096 byte(4kb), 4096 * 20000 = 80M
// await delay(1);
// }
webSocket.send(chunk);
}
},
close() {
// 当远程连接的可读流关闭时
log(`remoteConnection!.readable is close with hasIncomingData is ${hasIncomingData}`);
// 不需要主动关闭 WebSocket因为这可能导致 HTTP ERR_CONTENT_LENGTH_MISMATCH 问题
// 客户端无论如何都会发送关闭事件
// safeCloseWebSocket(webSocket);
},
abort(reason) {
// 当远程连接的可读流中断时
console.error(`remoteConnection!.readable abort`, reason);
},
})
)
.catch((error) => {
// 捕获并记录任何异常
console.error(
`remoteSocketToWS has exception `,
error.stack || error
);
// 发生错误时安全地关闭 WebSocket
safeCloseWebSocket(webSocket);
});
// 处理 Cloudflare 连接 Socket 的特殊错误情况
// 1. Socket.closed 将有错误
// 2. Socket.readable 将关闭,但没有任何数据
if (hasIncomingData === false && retry) {
log(`retry`);
retry(); // 调用重试函数,尝试重新建立连接
}
}
/**
* 将 Base64 编码的字符串转换为 ArrayBuffer
*
* @param {string} base64Str Base64 编码的输入字符串
* @returns {{ earlyData: ArrayBuffer | undefined, error: Error | null }} 返回解码后的 ArrayBuffer 或错误
*/
function base64ToArrayBuffer(base64Str) {
// 如果输入为空,直接返回空结果
if (!base64Str) {
return { earlyData: undefined, error: null };
}
try {
// Go 语言使用了 URL 安全的 Base64 变体RFC 4648
// 这种变体使用 '-' 和 '_' 来代替标准 Base64 中的 '+' 和 '/'
// JavaScript 的 atob 函数不直接支持这种变体,所以我们需要先转换
base64Str = base64Str.replace(/-/g, '+').replace(/_/g, '/');
// 使用 atob 函数解码 Base64 字符串
// atob 将 Base64 编码的 ASCII 字符串转换为原始的二进制字符串
const decode = atob(base64Str);
// 将二进制字符串转换为 Uint8Array
// 这是通过遍历字符串中的每个字符并获取其 Unicode 编码值0-255来完成的
const arryBuffer = Uint8Array.from(decode, (c) => c.charCodeAt(0));
// 返回 Uint8Array 的底层 ArrayBuffer
// 这是实际的二进制数据,可以用于网络传输或其他二进制操作
return { earlyData: arryBuffer.buffer, error: null };
} catch (error) {
// 如果在任何步骤中出现错误(如非法 Base64 字符),则返回错误
return { earlyData: undefined, error };
}
}
/**
* 这不是真正的 UUID 验证,而是一个简化的版本
* @param {string} uuid 要验证的 UUID 字符串
* @returns {boolean} 如果字符串匹配 UUID 格式则返回 true否则返回 false
*/
function isValidUUID(uuid) {
// 定义一个正则表达式来匹配 UUID 格式
const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
// 使用正则表达式测试 UUID 字符串
return uuidRegex.test(uuid);
}
// WebSocket 的两个重要状态常量
const WS_READY_STATE_OPEN = 1; // WebSocket 处于开放状态,可以发送和接收消息
const WS_READY_STATE_CLOSING = 2; // WebSocket 正在关闭过程中
function safeCloseWebSocket(socket) {
try {
// 只有在 WebSocket 处于开放或正在关闭状态时才调用 close()
// 这避免了在已关闭或连接中的 WebSocket 上调用 close()
if (socket.readyState === WS_READY_STATE_OPEN || socket.readyState === WS_READY_STATE_CLOSING) {
socket.close();
}
} catch (error) {
// 记录任何可能发生的错误,虽然按照规范不应该有错误
console.error('safeCloseWebSocket error', error);
}
}
// 预计算 0-255 每个字节的十六进制表示
const byteToHex = [];
for (let i = 0; i < 256; ++i) {
// (i + 256).toString(16) 确保总是得到两位数的十六进制
// .slice(1) 删除前导的 "1",只保留两位十六进制数
byteToHex.push((i + 256).toString(16).slice(1));
}
/**
* 快速地将字节数组转换为 UUID 字符串,不进行有效性检查
* 这是一个底层函数,直接操作字节,不做任何验证
* @param {Uint8Array} arr 包含 UUID 字节的数组
* @param {number} offset 数组中 UUID 开始的位置,默认为 0
* @returns {string} UUID 字符串
*/
function unsafeStringify(arr, offset = 0) {
// 直接从查找表中获取每个字节的十六进制表示,并拼接成 UUID 格式
// 8-4-4-4-12 的分组是通过精心放置的连字符 "-" 实现的
// toLowerCase() 确保整个 UUID 是小写的
return (byteToHex[arr[offset + 0]] + byteToHex[arr[offset + 1]] + byteToHex[arr[offset + 2]] + byteToHex[arr[offset + 3]] + "-" +
byteToHex[arr[offset + 4]] + byteToHex[arr[offset + 5]] + "-" +
byteToHex[arr[offset + 6]] + byteToHex[arr[offset + 7]] + "-" +
byteToHex[arr[offset + 8]] + byteToHex[arr[offset + 9]] + "-" +
byteToHex[arr[offset + 10]] + byteToHex[arr[offset + 11]] + byteToHex[arr[offset + 12]] +
byteToHex[arr[offset + 13]] + byteToHex[arr[offset + 14]] + byteToHex[arr[offset + 15]]).toLowerCase();
}
/**
* 将字节数组转换为 UUID 字符串,并验证其有效性
* 这是一个安全的函数,它确保返回的 UUID 格式正确
* @param {Uint8Array} arr 包含 UUID 字节的数组
* @param {number} offset 数组中 UUID 开始的位置,默认为 0
* @returns {string} 有效的 UUID 字符串
* @throws {TypeError} 如果生成的 UUID 字符串无效
*/
function stringify(arr, offset = 0) {
// 使用不安全的函数快速生成 UUID 字符串
const uuid = unsafeStringify(arr, offset);
// 验证生成的 UUID 是否有效
if (!isValidUUID(uuid)) {
// 原throw TypeError("Stringified UUID is invalid");
throw TypeError(`生成的 UUID 不符合规范 ${uuid}`);
//uuid = userID;
}
return uuid;
}
/**
* 处理 DNS 查询的函数
* @param {ArrayBuffer} udpChunk - 客户端发送的 DNS 查询数据
* @param {ArrayBuffer} WddGoResponseHeader - WddGo 协议的响应头部数据
* @param {(string)=> void} log - 日志记录函数
*/
async function handleDNSQuery(udpChunk, webSocket, WddGoResponseHeader, log) {
// 无论客户端发送到哪个 DNS 服务器,我们总是使用硬编码的服务器
// 因为有些 DNS 服务器不支持 DNS over TCP
try {
// 选用 Google 的 DNS 服务器(注:后续可能会改为 Cloudflare 的 1.1.1.1
const dnsServer = '8.8.4.4'; // 在 Cloudflare 修复连接自身 IP 的 bug 后,将改为 1.1.1.1
const dnsPort = 53; // DNS 服务的标准端口
let WddGoHeader = WddGoResponseHeader; // 保存 WddGo 响应头部,用于后续发送
// 与指定的 DNS 服务器建立 TCP 连接
const tcpSocket = connect({
hostname: dnsServer,
port: dnsPort,
});
log(`连接到 ${dnsServer}:${dnsPort}`); // 记录连接信息
const writer = tcpSocket.writable.getWriter();
await writer.write(udpChunk); // 将客户端的 DNS 查询数据发送给 DNS 服务器
writer.releaseLock(); // 释放写入器,允许其他部分使用
// 将从 DNS 服务器接收到的响应数据通过 WebSocket 发送回客户端
await tcpSocket.readable.pipeTo(new WritableStream({
async write(chunk) {
if (webSocket.readyState === WS_READY_STATE_OPEN) {
if (WddGoHeader) {
// 如果有 WddGo 头部,则将其与 DNS 响应数据合并后发送
webSocket.send(await new Blob([WddGoHeader, chunk]).arrayBuffer());
WddGoHeader = null; // 头部只发送一次,之后置为 null
} else {
// 否则直接发送 DNS 响应数据
webSocket.send(chunk);
}
}
},
close() {
log(`DNS 服务器(${dnsServer}) TCP 连接已关闭`); // 记录连接关闭信息
},
abort(reason) {
console.error(`DNS 服务器(${dnsServer}) TCP 连接异常中断`, reason); // 记录异常中断原因
},
}));
} catch (error) {
// 捕获并记录任何可能发生的错误
console.error(
`handleDNSQuery 函数发生异常,错误信息: ${error.message}`
);
}
}
async function socks5Connect(addressType, addressRemote, portRemote, log) {
// 从环境变量获取认证信息(假设在运行环境中已定义)
const username = sock5User
const password = sock5Pass
const hostname = sock5Host
const port = sock5Port
// 直连代理服务器
const socket = connect({ hostname, port });
const writer = socket.writable.getWriter();
const reader = socket.readable.getReader();
const encoder = new TextEncoder();
// 精简握手流程:直接声明需要用户名密码认证
await writer.write(new Uint8Array([5, 1, 2])); // 只支持 0x02 方法
log('SOCKS5 认证方法协商');
// 处理认证响应
let res = (await reader.read()).value;
if (res[0] !== 0x05 || res[1] !== 0x02) {
res[1] === 0xff && log("不支持的认证方式");
return;
}
// 构造认证数据包(提前计算长度避免重复编码)
const userBytes = encoder.encode(username);
const passBytes = encoder.encode(password);
const authHeader = new Uint8Array(3 + userBytes.length + passBytes.length);
authHeader.set([1, userBytes.length, ...userBytes, passBytes.length], 0);
authHeader.set(passBytes, 2 + userBytes.length); // 优化内存拷贝
await writer.write(authHeader);
res = (await reader.read()).value;
if (res[0] !== 0x01 || res[1] !== 0x00) {
log(`认证失败 code: 0x${res[1].toString(16)}`);
return;
}
// 构造目标地址(优化二进制操作)
const header = new Uint8Array([5, 1, 0]);
const addrBuffer = new Uint8Array(
addressType === 3 ? 16 + 1 : // IPv6
addressType === 2 ? encoder.encode(addressRemote).length + 2 : // 域名
4 + 1 // IPv4
);
let offset = 0;
addrBuffer[offset++] = addressType === 3 ? 4 : addressType;
if (addressType === 2) {
addrBuffer[offset++] = addressRemote.length;
encoder.encodeInto(addressRemote, addrBuffer.subarray(offset));
} else {
const octets = addressType === 3 ?
new Uint16Array(addressRemote.split(':').flatMap(p =>
[parseInt(p.substring(0,4),16), parseInt(p.substring(4),16)])) :
addressRemote.split('.').map(Number);
addrBuffer.set(new Uint8Array(octets.buffer || octets), offset);
}
// 合并数据包并发送
const finalPacket = new Uint8Array([
...header,
...addrBuffer,
portRemote >> 8, portRemote & 0xff
]);
await writer.write(finalPacket);
log('SOCKS5 连接请求已发送');
// 验证最终响应
res = (await reader.read()).value;
if (res[1] !== 0x00) {
log(`连接失败 code: 0x${res[1].toString(16)}`);
return;
}
writer.releaseLock();
reader.releaseLock();
return socket;
}

View File

@@ -0,0 +1,996 @@
// @ts-ignore
import { connect } from "cloudflare:sockets";
let userID = "86c50e3a-5b87-49dd-bd20-03c7f2735e40";
const proxyIPs = ["ts.hpc.tw","47.254.66.75","146.70.175.98","146.70.175.99","146.70.175.100","146.70.175.101","146.70.175.102","146.70.175.103","146.70.175.104","146.70.175.106","cdn-all.xn--b6gac.eu.org","cdn.xn--b6gac.eu.org"];
const cn_hostnames = [''];
let CDNIP = 'cdn-all.xijingping.link'
// http_ip
let IP1 = 'www.visa.com'
let IP2 = 'cis.visa.com'
let IP3 = 'africa.visa.com'
let IP4 = 'www.visa.com.sg'
let IP5 = 'www.visaeurope.at'
let IP6 = 'www.visa.com.mt'
let IP7 = 'qa.visamiddleeast.com'
// https_ip
let IP8 = 'usa.visa.com'
let IP9 = 'malaysia.com'
let IP10 = 'www.visa.co.jp'
let IP11 = 'www.digitalocean.com'
let IP12 = 'japan.com'
let IP13 = 'cdn-b100.xn--b6gac.eu.org'
// http_port
let PT1 = '80'
let PT2 = '8080'
let PT3 = '8880'
let PT4 = '2052'
let PT5 = '2082'
let PT6 = '2086'
let PT7 = '2095'
// https_port
let PT8 = '443'
let PT9 = '8443'
let PT10 = '2053'
let PT11 = '2083'
let PT12 = '2087'
let PT13 = '2096'
let proxyIP = proxyIPs[Math.floor(Math.random() * proxyIPs.length)];
let proxyPort = proxyIP.includes(':') ? proxyIP.split(':')[1] : '443';
if (!isValidUUID(userID)) {
throw new Error("uuid is not valid");
}
export default {
/**
* @param {import("@cloudflare/workers-types").Request} request
* @param {uuid: string, proxyip: string, cdnip: string, ip1: string, ip2: string, ip3: string, ip4: string, ip5: string, ip6: string, ip7: string, ip8: string, ip9: string, ip10: string, ip11: string, ip12: string, ip13: string, pt1: string, pt2: string, pt3: string, pt4: string, pt5: string, pt6: string, pt7: string, pt8: string, pt9: string, pt10: string, pt11: string, pt12: string, pt13: string} env
* @param {import("@cloudflare/workers-types").ExecutionContext} ctx
* @returns {Promise<Response>}
*/
async fetch(request, env, ctx) {
try {
const { proxyip } = env;
userID = env.uuid || userID;
if (proxyip) {
if (proxyip.includes(']:')) {
let lastColonIndex = proxyip.lastIndexOf(':');
proxyPort = proxyip.slice(lastColonIndex + 1);
proxyIP = proxyip.slice(0, lastColonIndex);
} else if (!proxyip.includes(']:') && !proxyip.includes(']')) {
[proxyIP, proxyPort = '443'] = proxyip.split(':');
} else {
proxyPort = '443';
proxyIP = proxyip;
}
} else {
if (proxyIP.includes(']:')) {
let lastColonIndex = proxyIP.lastIndexOf(':');
proxyPort = proxyIP.slice(lastColonIndex + 1);
proxyIP = proxyIP.slice(0, lastColonIndex);
} else if (!proxyIP.includes(']:') && !proxyIP.includes(']')) {
[proxyIP, proxyPort = '443'] = proxyIP.split(':');
} else {
proxyPort = '443';
}
}
console.log('ProxyIP:', proxyIP);
console.log('ProxyPort:', proxyPort);
CDNIP = env.cdnip || CDNIP;
IP1 = env.ip1 || IP1;
IP2 = env.ip2 || IP2;
IP3 = env.ip3 || IP3;
IP4 = env.ip4 || IP4;
IP5 = env.ip5 || IP5;
IP6 = env.ip6 || IP6;
IP7 = env.ip7 || IP7;
IP8 = env.ip8 || IP8;
IP9 = env.ip9 || IP9;
IP10 = env.ip10 || IP10;
IP11 = env.ip11 || IP11;
IP12 = env.ip12 || IP12;
IP13 = env.ip13 || IP13;
PT1 = env.pt1 || PT1;
PT2 = env.pt2 || PT2;
PT3 = env.pt3 || PT3;
PT4 = env.pt4 || PT4;
PT5 = env.pt5 || PT5;
PT6 = env.pt6 || PT6;
PT7 = env.pt7 || PT7;
PT8 = env.pt8 || PT8;
PT9 = env.pt9 || PT9;
PT10 = env.pt10 || PT10;
PT11 = env.pt11 || PT11;
PT12 = env.pt12 || PT12;
PT13 = env.pt13 || PT13;
const upgradeHeader = request.headers.get("Upgrade");
const url = new URL(request.url);
if (!upgradeHeader || upgradeHeader !== "websocket") {
// return new Response('Not found', { status: 404 });
// For any other path, reverse proxy to 'ramdom website' and return the original response, caching it in the process
if (cn_hostnames.includes('')) {
return new Response(JSON.stringify(request.cf, null, 4), {
status: 200,
headers: {
"Content-Type": "application/json;charset=utf-8",
},
});
}
const randomHostname = cn_hostnames[Math.floor(Math.random() * cn_hostnames.length)];
const newHeaders = new Headers(request.headers);
newHeaders.set("cf-connecting-ip", "1.2.3.4");
newHeaders.set("x-forwarded-for", "1.2.3.4");
newHeaders.set("x-real-ip", "1.2.3.4");
newHeaders.set("referer", "https://www.google.com/search?q=edtunnel");
// Use fetch to proxy the request to 15 different domains
const proxyUrl = "https://" + randomHostname + url.pathname + url.search;
let modifiedRequest = new Request(proxyUrl, {
method: request.method,
headers: newHeaders,
body: request.body,
redirect: "manual",
});
const proxyResponse = await fetch(modifiedRequest, { redirect: "manual" });
// Check for 302 or 301 redirect status and return an error response
if ([301, 302].includes(proxyResponse.status)) {
return new Response(`Redirects to ${randomHostname} are not allowed.`, {
status: 403,
statusText: "Forbidden",
});
}
// Return the response from the proxy server
return proxyResponse;
} else {
return await vlessOverWSHandler(request);
}
} catch (err) {
/** @type {Error} */ let e = err;
return new Response(e.toString());
}
},
};
function isValidIP(ip) {
var reg = /^[\s\S]*$/;
return reg.test(ip);
}
/**
*
* @param {import("@cloudflare/workers-types").Request} request
*/
async function vlessOverWSHandler(request) {
/** @type {import("@cloudflare/workers-types").WebSocket[]} */
// @ts-ignore
const webSocketPair = new WebSocketPair();
const [client, webSocket] = Object.values(webSocketPair);
webSocket.accept();
let address = "";
let portWithRandomLog = "";
const log = (/** @type {string} */ info, /** @type {string | undefined} */ event) => {
console.log(`[${address}:${portWithRandomLog}] ${info}`, event || "");
};
const earlyDataHeader = request.headers.get("sec-websocket-protocol") || "";
const readableWebSocketStream = makeReadableWebSocketStream(webSocket, earlyDataHeader, log);
/** @type {{ value: import("@cloudflare/workers-types").Socket | null}}*/
let remoteSocketWapper = {
value: null,
};
let udpStreamWrite = null;
let isDns = false;
// ws --> remote
readableWebSocketStream
.pipeTo(
new WritableStream({
async write(chunk, controller) {
if (isDns && udpStreamWrite) {
return udpStreamWrite(chunk);
}
if (remoteSocketWapper.value) {
const writer = remoteSocketWapper.value.writable.getWriter();
await writer.write(chunk);
writer.releaseLock();
return;
}
const {
hasError,
message,
portRemote = 443,
addressRemote = "",
rawDataIndex,
vlessVersion = new Uint8Array([0, 0]),
isUDP,
} = await processVlessHeader(chunk, userID);
address = addressRemote;
portWithRandomLog = `${portRemote}--${Math.random()} ${isUDP ? "udp " : "tcp "} `;
if (hasError) {
// controller.error(message);
throw new Error(message); // cf seems has bug, controller.error will not end stream
// webSocket.close(1000, message);
return;
}
// if UDP but port not DNS port, close it
if (isUDP) {
if (portRemote === 53) {
isDns = true;
} else {
// controller.error('UDP proxy only enable for DNS which is port 53');
throw new Error("UDP proxy only enable for DNS which is port 53"); // cf seems has bug, controller.error will not end stream
return;
}
}
// ["version", "附加信息长度 N"]
const vlessResponseHeader = new Uint8Array([vlessVersion[0], 0]);
const rawClientData = chunk.slice(rawDataIndex);
// TODO: support udp here when cf runtime has udp support
if (isDns) {
const { write } = await handleUDPOutBound(webSocket, vlessResponseHeader, log);
udpStreamWrite = write;
udpStreamWrite(rawClientData);
return;
}
handleTCPOutBound(
remoteSocketWapper,
addressRemote,
portRemote,
rawClientData,
webSocket,
vlessResponseHeader,
log
);
},
close() {
log(`readableWebSocketStream is close`);
},
abort(reason) {
log(`readableWebSocketStream is abort`, JSON.stringify(reason));
},
})
)
.catch((err) => {
log("readableWebSocketStream pipeTo error", err);
});
return new Response(null, {
status: 101,
// @ts-ignore
webSocket: client,
});
}
/**
* Checks if a given UUID is present in the API response.
* @param {string} targetUuid The UUID to search for.
* @returns {Promise<boolean>} A Promise that resolves to true if the UUID is present in the API response, false otherwise.
*/
async function checkUuidInApiResponse(targetUuid) {
// Check if any of the environment variables are empty
try {
const apiResponse = await getApiResponse();
if (!apiResponse) {
return false;
}
const isUuidInResponse = apiResponse.users.some((user) => user.uuid === targetUuid);
return isUuidInResponse;
} catch (error) {
console.error("Error:", error);
return false;
}
}
/**
* Handles outbound TCP connections.
*
* @param {any} remoteSocket
* @param {string} addressRemote The remote address to connect to.
* @param {number} portRemote The remote port to connect to.
* @param {Uint8Array} rawClientData The raw client data to write.
* @param {import("@cloudflare/workers-types").WebSocket} webSocket The WebSocket to pass the remote socket to.
* @param {Uint8Array} vlessResponseHeader The vless response header.
* @param {function} log The logging function.
* @returns {Promise<void>} The remote socket.
*/
async function handleTCPOutBound(
remoteSocket,
addressRemote,
portRemote,
rawClientData,
webSocket,
vlessResponseHeader,
log
) {
async function connectAndWrite(address, port) {
if (/^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(address)) address = `${atob('d3d3Lg==')}${address}${atob('LnNzbGlwLmlv')}`;
/** @type {import("@cloudflare/workers-types").Socket} */
const tcpSocket = connect({
hostname: address,
port: port,
});
remoteSocket.value = tcpSocket;
log(`connected to ${address}:${port}`);
const writer = tcpSocket.writable.getWriter();
await writer.write(rawClientData); // first write, nomal is tls client hello
writer.releaseLock();
return tcpSocket;
}
// if the cf connect tcp socket have no incoming data, we retry to redirect ip
async function retry() {
const tcpSocket = await connectAndWrite(proxyIP || addressRemote, proxyPort || portRemote);
// no matter retry success or not, close websocket
tcpSocket.closed
.catch((error) => {
console.log("retry tcpSocket closed error", error);
})
.finally(() => {
safeCloseWebSocket(webSocket);
});
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, null, log);
}
const tcpSocket = await connectAndWrite(addressRemote, portRemote);
// when remoteSocket is ready, pass to websocket
// remote--> ws
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, retry, log);
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocketServer
* @param {string} earlyDataHeader for ws 0rtt
* @param {(info: string)=> void} log for ws 0rtt
*/
function makeReadableWebSocketStream(webSocketServer, earlyDataHeader, log) {
let readableStreamCancel = false;
const stream = new ReadableStream({
start(controller) {
webSocketServer.addEventListener("message", (event) => {
if (readableStreamCancel) {
return;
}
const message = event.data;
controller.enqueue(message);
});
// The event means that the client closed the client -> server stream.
// However, the server -> client stream is still open until you call close() on the server side.
// The WebSocket protocol says that a separate close message must be sent in each direction to fully close the socket.
webSocketServer.addEventListener("close", () => {
// client send close, need close server
// if stream is cancel, skip controller.close
safeCloseWebSocket(webSocketServer);
if (readableStreamCancel) {
return;
}
controller.close();
});
webSocketServer.addEventListener("error", (err) => {
log("webSocketServer has error");
controller.error(err);
});
// for ws 0rtt
const { earlyData, error } = base64ToArrayBuffer(earlyDataHeader);
if (error) {
controller.error(error);
} else if (earlyData) {
controller.enqueue(earlyData);
}
},
pull(controller) {
// if ws can stop read if stream is full, we can implement backpressure
// https://streams.spec.whatwg.org/#example-rs-push-backpressure
},
cancel(reason) {
// 1. pipe WritableStream has error, this cancel will called, so ws handle server close into here
// 2. if readableStream is cancel, all controller.close/enqueue need skip,
// 3. but from testing controller.error still work even if readableStream is cancel
if (readableStreamCancel) {
return;
}
log(`ReadableStream was canceled, due to ${reason}`);
readableStreamCancel = true;
safeCloseWebSocket(webSocketServer);
},
});
return stream;
}
// https://xtls.github.io/development/protocols/vless.html
// https://github.com/zizifn/excalidraw-backup/blob/main/v2ray-protocol.excalidraw
/**
*
* @param { ArrayBuffer} vlessBuffer
* @param {string} userID
* @returns
*/
async function processVlessHeader(vlessBuffer, userID) {
if (vlessBuffer.byteLength < 24) {
return {
hasError: true,
message: "invalid data",
};
}
const version = new Uint8Array(vlessBuffer.slice(0, 1));
let isValidUser = false;
let isUDP = false;
const slicedBuffer = new Uint8Array(vlessBuffer.slice(1, 17));
const slicedBufferString = stringify(slicedBuffer);
const uuids = userID.includes(",") ? userID.split(",") : [userID];
const checkUuidInApi = await checkUuidInApiResponse(slicedBufferString);
isValidUser = uuids.some((userUuid) => checkUuidInApi || slicedBufferString === userUuid.trim());
console.log(`checkUuidInApi: ${await checkUuidInApiResponse(slicedBufferString)}, userID: ${slicedBufferString}`);
if (!isValidUser) {
return {
hasError: true,
message: "invalid user",
};
}
const optLength = new Uint8Array(vlessBuffer.slice(17, 18))[0];
//skip opt for now
const command = new Uint8Array(vlessBuffer.slice(18 + optLength, 18 + optLength + 1))[0];
// 0x01 TCP
// 0x02 UDP
// 0x03 MUX
if (command === 1) {
} else if (command === 2) {
isUDP = true;
} else {
return {
hasError: true,
message: `command ${command} is not support, command 01-tcp,02-udp,03-mux`,
};
}
const portIndex = 18 + optLength + 1;
const portBuffer = vlessBuffer.slice(portIndex, portIndex + 2);
// port is big-Endian in raw data etc 80 == 0x005d
const portRemote = new DataView(portBuffer).getUint16(0);
let addressIndex = portIndex + 2;
const addressBuffer = new Uint8Array(vlessBuffer.slice(addressIndex, addressIndex + 1));
// 1--> ipv4 addressLength =4
// 2--> domain name addressLength=addressBuffer[1]
// 3--> ipv6 addressLength =16
const addressType = addressBuffer[0];
let addressLength = 0;
let addressValueIndex = addressIndex + 1;
let addressValue = "";
switch (addressType) {
case 1:
addressLength = 4;
addressValue = new Uint8Array(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength)).join(".");
break;
case 2:
addressLength = new Uint8Array(vlessBuffer.slice(addressValueIndex, addressValueIndex + 1))[0];
addressValueIndex += 1;
addressValue = new TextDecoder().decode(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength));
break;
case 3:
addressLength = 16;
const dataView = new DataView(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength));
// 2001:0db8:85a3:0000:0000:8a2e:0370:7334
const ipv6 = [];
for (let i = 0; i < 8; i++) {
ipv6.push(dataView.getUint16(i * 2).toString(16));
}
addressValue = ipv6.join(":");
// seems no need add [] for ipv6
break;
default:
return {
hasError: true,
message: `invild addressType is ${addressType}`,
};
}
if (!addressValue) {
return {
hasError: true,
message: `addressValue is empty, addressType is ${addressType}`,
};
}
return {
hasError: false,
addressRemote: addressValue,
addressType,
portRemote,
rawDataIndex: addressValueIndex + addressLength,
vlessVersion: version,
isUDP,
};
}
/**
*
* @param {import("@cloudflare/workers-types").Socket} remoteSocket
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(() => Promise<void>) | null} retry
* @param {*} log
*/
async function remoteSocketToWS(remoteSocket, webSocket, vlessResponseHeader, retry, log) {
// remote--> ws
let remoteChunkCount = 0;
let chunks = [];
/** @type {ArrayBuffer | null} */
let vlessHeader = vlessResponseHeader;
let hasIncomingData = false; // check if remoteSocket has incoming data
await remoteSocket.readable
.pipeTo(
new WritableStream({
start() {},
/**
*
* @param {Uint8Array} chunk
* @param {*} controller
*/
async write(chunk, controller) {
hasIncomingData = true;
// remoteChunkCount++;
if (webSocket.readyState !== WS_READY_STATE_OPEN) {
controller.error("webSocket.readyState is not open, maybe close");
}
if (vlessHeader) {
webSocket.send(await new Blob([vlessHeader, chunk]).arrayBuffer());
vlessHeader = null;
} else {
// seems no need rate limit this, CF seems fix this??..
// if (remoteChunkCount > 20000) {
// // cf one package is 4096 byte(4kb), 4096 * 20000 = 80M
// await delay(1);
// }
webSocket.send(chunk);
}
},
close() {
log(`remoteConnection!.readable is close with hasIncomingData is ${hasIncomingData}`);
// safeCloseWebSocket(webSocket); // no need server close websocket frist for some case will casue HTTP ERR_CONTENT_LENGTH_MISMATCH issue, client will send close event anyway.
},
abort(reason) {
console.error(`remoteConnection!.readable abort`, reason);
},
})
)
.catch((error) => {
console.error(`remoteSocketToWS has exception `, error.stack || error);
safeCloseWebSocket(webSocket);
});
// seems is cf connect socket have error,
// 1. Socket.closed will have error
// 2. Socket.readable will be close without any data coming
if (hasIncomingData === false && retry) {
log(`retry`);
retry();
}
}
/**
*
* @param {string} base64Str
* @returns
*/
function base64ToArrayBuffer(base64Str) {
if (!base64Str) {
return { error: null };
}
try {
// go use modified Base64 for URL rfc4648 which js atob not support
base64Str = base64Str.replace(/-/g, "+").replace(/_/g, "/");
const decode = atob(base64Str);
const arryBuffer = Uint8Array.from(decode, (c) => c.charCodeAt(0));
return { earlyData: arryBuffer.buffer, error: null };
} catch (error) {
return { error };
}
}
/**
* This is not real UUID validation
* @param {string} uuid
*/
function isValidUUID(uuid) {
const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
return uuidRegex.test(uuid);
}
const WS_READY_STATE_OPEN = 1;
const WS_READY_STATE_CLOSING = 2;
/**
* Normally, WebSocket will not has exceptions when close.
* @param {import("@cloudflare/workers-types").WebSocket} socket
*/
function safeCloseWebSocket(socket) {
try {
if (socket.readyState === WS_READY_STATE_OPEN || socket.readyState === WS_READY_STATE_CLOSING) {
socket.close();
}
} catch (error) {
console.error("safeCloseWebSocket error", error);
}
}
const byteToHex = [];
for (let i = 0; i < 256; ++i) {
byteToHex.push((i + 256).toString(16).slice(1));
}
function unsafeStringify(arr, offset = 0) {
return (
byteToHex[arr[offset + 0]] +
byteToHex[arr[offset + 1]] +
byteToHex[arr[offset + 2]] +
byteToHex[arr[offset + 3]] +
"-" +
byteToHex[arr[offset + 4]] +
byteToHex[arr[offset + 5]] +
"-" +
byteToHex[arr[offset + 6]] +
byteToHex[arr[offset + 7]] +
"-" +
byteToHex[arr[offset + 8]] +
byteToHex[arr[offset + 9]] +
"-" +
byteToHex[arr[offset + 10]] +
byteToHex[arr[offset + 11]] +
byteToHex[arr[offset + 12]] +
byteToHex[arr[offset + 13]] +
byteToHex[arr[offset + 14]] +
byteToHex[arr[offset + 15]]
).toLowerCase();
}
function stringify(arr, offset = 0) {
const uuid = unsafeStringify(arr, offset);
if (!isValidUUID(uuid)) {
throw TypeError("Stringified UUID is invalid");
}
return uuid;
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(string)=> void} log
*/
async function handleUDPOutBound(webSocket, vlessResponseHeader, log) {
let isVlessHeaderSent = false;
const transformStream = new TransformStream({
start(controller) {},
transform(chunk, controller) {
// udp message 2 byte is the the length of udp data
// TODO: this should have bug, beacsue maybe udp chunk can be in two websocket message
for (let index = 0; index < chunk.byteLength; ) {
const lengthBuffer = chunk.slice(index, index + 2);
const udpPakcetLength = new DataView(lengthBuffer).getUint16(0);
const udpData = new Uint8Array(chunk.slice(index + 2, index + 2 + udpPakcetLength));
index = index + 2 + udpPakcetLength;
controller.enqueue(udpData);
}
},
flush(controller) {},
});
// only handle dns udp for now
transformStream.readable
.pipeTo(
new WritableStream({
async write(chunk) {
const resp = await fetch(
dohURL, // dns server url
{
method: "POST",
headers: {
"content-type": "application/dns-message",
},
body: chunk,
}
);
const dnsQueryResult = await resp.arrayBuffer();
const udpSize = dnsQueryResult.byteLength;
// console.log([...new Uint8Array(dnsQueryResult)].map((x) => x.toString(16)));
const udpSizeBuffer = new Uint8Array([(udpSize >> 8) & 0xff, udpSize & 0xff]);
if (webSocket.readyState === WS_READY_STATE_OPEN) {
log(`doh success and dns message length is ${udpSize}`);
if (isVlessHeaderSent) {
webSocket.send(await new Blob([udpSizeBuffer, dnsQueryResult]).arrayBuffer());
} else {
webSocket.send(await new Blob([vlessResponseHeader, udpSizeBuffer, dnsQueryResult]).arrayBuffer());
isVlessHeaderSent = true;
}
}
},
})
)
.catch((error) => {
log("dns udp has error" + error);
});
const writer = transformStream.writable.getWriter();
return {
/**
*
* @param {Uint8Array} chunk
*/
write(chunk) {
writer.write(chunk);
},
};
}
function gettyConfig(userID, hostName) {
const vlessshare = btoa(`vless://${userID}@${IP1}:${PT1}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V1_${IP1}_${PT1}\nvless://${userID}@${IP2}:${PT2}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V2_${IP2}_${PT2}\nvless://${userID}@${IP3}:${PT3}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V3_${IP3}_${PT3}\nvless://${userID}@${IP4}:${PT4}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V4_${IP4}_${PT4}\nvless://${userID}@${IP5}:${PT5}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V5_${IP5}_${PT5}\nvless://${userID}@${IP6}:${PT6}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V6_${IP6}_${PT6}\nvless://${userID}@${IP7}:${PT7}?encryption=none&security=none&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V7_${IP7}_${PT7}\nvless://${userID}@${IP8}:${PT8}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V8_${IP8}_${PT8}\nvless://${userID}@${IP9}:${PT9}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V9_${IP9}_${PT9}\nvless://${userID}@${IP10}:${PT10}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V10_${IP10}_${PT10}\nvless://${userID}@${IP11}:${PT11}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V11_${IP11}_${PT11}\nvless://${userID}@${IP12}:${PT12}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V12_${IP12}_${PT12}\nvless://${userID}@${IP13}:${PT13}?encryption=none&security=tls&sni=${hostName}&fp=randomized&type=ws&host=${hostName}&path=%2F%3Fed%3D2560#CF_V13_${IP13}_${PT13}`);
return `${vlessshare}`
}
function getclConfig(userID, hostName) {
return `
proxies:
- name: CF_V1_${IP1}_${PT1}
type: vless
server: ${IP1}
port: ${PT1}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/worker34"
headers:
Host: ${hostName}
- name: CF_V2_${IP2}_${PT2}
type: vless
server: ${IP2}
port: ${PT2}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/worker3"
headers:
Host: ${hostName}
- name: CF_V3_${IP3}_${PT3}
type: vless
server: ${IP3}
port: ${PT3}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/wosa"
headers:
Host: ${hostName}
- name: CF_V4_${IP4}_${PT4}
type: vless
server: ${IP4}
port: ${PT4}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/finance"
headers:
Host: ${hostName}
- name: CF_V5_${IP5}_${PT5}
type: vless
server: ${IP5}
port: ${PT5}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/finance"
headers:
Host: ${hostName}
- name: CF_V6_${IP6}_${PT6}
type: vless
server: ${IP6}
port: ${PT6}
uuid: ${userID}
udp: false
tls: false
network: ws
ws-opts:
path: "/city"
headers:
Host: ${hostName}
- name: CF_V7_${IP7}_${PT7}
type: vless
server: ${IP7}
port: ${PT7}
uuid: ${userID}
udp: false
tls: false
network: ws
servername: ${hostName}
ws-opts:
path: "/city"
headers:
Host: ${hostName}
- name: CF_V8_${IP8}_${PT8}
type: vless
server: ${IP8}
port: ${PT8}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/cccc"
headers:
Host: ${hostName}
- name: CF_V9_${IP9}_${PT9}
type: vless
server: ${IP9}
port: ${PT9}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/thank_you"
headers:
Host: ${hostName}
- name: CF_V10_${IP10}_${PT10}
type: vless
server: ${IP10}
port: ${PT10}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/weather"
headers:
Host: ${hostName}
- name: CF_V11_${IP11}_${PT11}
type: vless
server: ${IP11}
port: ${PT11}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/weather"
headers:
Host: ${hostName}
- name: CF_V12_${IP12}_${PT12}
type: vless
server: ${IP12}
port: ${PT12}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/weather"
headers:
Host: ${hostName}
- name: CF_V13_${IP13}_${PT13}
type: vless
server: ${IP13}
port: ${PT13}
uuid: ${userID}
udp: false
tls: true
network: ws
servername: ${hostName}
ws-opts:
path: "/weather"
headers:
Host: ${hostName}
proxy-groups:
- name: 负载均衡
type: load-balance
url: http://www.gstatic.com/generate_204
interval: 300
proxies:
- CF_V1_${IP1}_${PT1}
- CF_V2_${IP2}_${PT2}
- CF_V3_${IP3}_${PT3}
- CF_V4_${IP4}_${PT4}
- CF_V5_${IP5}_${PT5}
- CF_V6_${IP6}_${PT6}
- CF_V7_${IP7}_${PT7}
- CF_V8_${IP8}_${PT8}
- CF_V9_${IP9}_${PT9}
- CF_V10_${IP10}_${PT10}
- CF_V11_${IP11}_${PT11}
- CF_V12_${IP12}_${PT12}
- CF_V13_${IP13}_${PT13}
- name: 自动选择
type: url-test
url: http://www.gstatic.com/generate_204
interval: 300
tolerance: 50
proxies:
- CF_V1_${IP1}_${PT1}
- CF_V2_${IP2}_${PT2}
- CF_V3_${IP3}_${PT3}
- CF_V4_${IP4}_${PT4}
- CF_V5_${IP5}_${PT5}
- CF_V6_${IP6}_${PT6}
- CF_V7_${IP7}_${PT7}
- CF_V8_${IP8}_${PT8}
- CF_V9_${IP9}_${PT9}
- CF_V10_${IP10}_${PT10}
- CF_V11_${IP11}_${PT11}
- CF_V12_${IP12}_${PT12}
- CF_V13_${IP13}_${PT13}
- name: 🌍选择代理
type: select
proxies:
- 负载均衡
- 自动选择
- DIRECT
- CF_V1_${IP1}_${PT1}
- CF_V2_${IP2}_${PT2}
- CF_V3_${IP3}_${PT3}
- CF_V4_${IP4}_${PT4}
- CF_V5_${IP5}_${PT5}
- CF_V6_${IP6}_${PT6}
- CF_V7_${IP7}_${PT7}
- CF_V8_${IP8}_${PT8}
- CF_V9_${IP9}_${PT9}
- CF_V10_${IP10}_${PT10}
- CF_V11_${IP11}_${PT11}
- CF_V12_${IP12}_${PT12}
- CF_V13_${IP13}_${PT13}
rules:
- GEOIP,LAN,DIRECT
- GEOIP,CN,DIRECT
- MATCH,🌍选择代理`
}

View File

@@ -0,0 +1,581 @@
/**
*
* @param {import("@cloudflare/workers-types").Request} request
*/
async function vlessOverWSHandler(request) {
/** @type {import("@cloudflare/workers-types").WebSocket[]} */
// @ts-ignore
const webSocketPair = new WebSocketPair();
const [client, webSocket] = Object.values(webSocketPair);
webSocket.accept();
let address = "";
let portWithRandomLog = "";
const log = (/** @type {string} */ info, /** @type {string | undefined} */ event) => {
console.log(`[${address}:${portWithRandomLog}] ${info}`, event || "");
};
const earlyDataHeader = request.headers.get("sec-websocket-protocol") || "";
const readableWebSocketStream = makeReadableWebSocketStream(webSocket, earlyDataHeader, log);
/** @type {{ value: import("@cloudflare/workers-types").Socket | null}}*/
let remoteSocketWapper = {
value: null,
};
let udpStreamWrite = null;
let isDns = false;
// ws --> remote
readableWebSocketStream
.pipeTo(
new WritableStream({
async write(chunk, controller) {
if (isDns && udpStreamWrite) {
return udpStreamWrite(chunk);
}
if (remoteSocketWapper.value) {
const writer = remoteSocketWapper.value.writable.getWriter();
await writer.write(chunk);
writer.releaseLock();
return;
}
const {
hasError,
message,
portRemote = 443,
addressRemote = "",
rawDataIndex,
vlessVersion = new Uint8Array([0, 0]),
isUDP,
} = await processVlessHeader(chunk, userID);
address = addressRemote;
portWithRandomLog = `${portRemote}--${Math.random()} ${isUDP ? "udp " : "tcp "} `;
if (hasError) {
// controller.error(message);
throw new Error(message); // cf seems has bug, controller.error will not end stream
// webSocket.close(1000, message);
return;
}
// if UDP but port not DNS port, close it
if (isUDP) {
if (portRemote === 53) {
isDns = true;
} else {
// controller.error('UDP proxy only enable for DNS which is port 53');
throw new Error("UDP proxy only enable for DNS which is port 53"); // cf seems has bug, controller.error will not end stream
return;
}
}
// ["version", "附加信息长度 N"]
const vlessResponseHeader = new Uint8Array([vlessVersion[0], 0]);
const rawClientData = chunk.slice(rawDataIndex);
// TODO: support udp here when cf runtime has udp support
if (isDns) {
const { write } = await handleUDPOutBound(webSocket, vlessResponseHeader, log);
udpStreamWrite = write;
udpStreamWrite(rawClientData);
return;
}
handleTCPOutBound(
remoteSocketWapper,
addressRemote,
portRemote,
rawClientData,
webSocket,
vlessResponseHeader,
log
);
},
close() {
log(`readableWebSocketStream is close`);
},
abort(reason) {
log(`readableWebSocketStream is abort`, JSON.stringify(reason));
},
})
)
.catch((err) => {
log("readableWebSocketStream pipeTo error", err);
});
return new Response(null, {
status: 101,
// @ts-ignore
webSocket: client,
});
}
/**
* Checks if a given UUID is present in the API response.
* @param {string} targetUuid The UUID to search for.
* @returns {Promise<boolean>} A Promise that resolves to true if the UUID is present in the API response, false otherwise.
*/
async function checkUuidInApiResponse(targetUuid) {
// Check if any of the environment variables are empty
try {
const apiResponse = await getApiResponse();
if (!apiResponse) {
return false;
}
const isUuidInResponse = apiResponse.users.some((user) => user.uuid === targetUuid);
return isUuidInResponse;
} catch (error) {
console.error("Error:", error);
return false;
}
}
/**
* Handles outbound TCP connections.
*
* @param {any} remoteSocket
* @param {string} addressRemote The remote address to connect to.
* @param {number} portRemote The remote port to connect to.
* @param {Uint8Array} rawClientData The raw client data to write.
* @param {import("@cloudflare/workers-types").WebSocket} webSocket The WebSocket to pass the remote socket to.
* @param {Uint8Array} vlessResponseHeader The vless response header.
* @param {function} log The logging function.
* @returns {Promise<void>} The remote socket.
*/
async function handleTCPOutBound(
remoteSocket,
addressRemote,
portRemote,
rawClientData,
webSocket,
vlessResponseHeader,
log
) {
async function connectAndWrite(address, port) {
if (/^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(address)) address = `${atob('d3d3Lg==')}${address}${atob('LnNzbGlwLmlv')}`;
/** @type {import("@cloudflare/workers-types").Socket} */
const tcpSocket = connect({
hostname: address,
port: port,
});
remoteSocket.value = tcpSocket;
log(`connected to ${address}:${port}`);
const writer = tcpSocket.writable.getWriter();
await writer.write(rawClientData); // first write, nomal is tls client hello
writer.releaseLock();
return tcpSocket;
}
// if the cf connect tcp socket have no incoming data, we retry to redirect ip
async function retry() {
const tcpSocket = await connectAndWrite(proxyIP || addressRemote, proxyPort || portRemote);
// no matter retry success or not, close websocket
tcpSocket.closed
.catch((error) => {
console.log("retry tcpSocket closed error", error);
})
.finally(() => {
safeCloseWebSocket(webSocket);
});
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, null, log);
}
const tcpSocket = await connectAndWrite(addressRemote, portRemote);
// when remoteSocket is ready, pass to websocket
// remote--> ws
remoteSocketToWS(tcpSocket, webSocket, vlessResponseHeader, retry, log);
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocketServer
* @param {string} earlyDataHeader for ws 0rtt
* @param {(info: string)=> void} log for ws 0rtt
*/
function makeReadableWebSocketStream(webSocketServer, earlyDataHeader, log) {
let readableStreamCancel = false;
const stream = new ReadableStream({
start(controller) {
webSocketServer.addEventListener("message", (event) => {
if (readableStreamCancel) {
return;
}
const message = event.data;
controller.enqueue(message);
});
// The event means that the client closed the client -> server stream.
// However, the server -> client stream is still open until you call close() on the server side.
// The WebSocket protocol says that a separate close message must be sent in each direction to fully close the socket.
webSocketServer.addEventListener("close", () => {
// client send close, need close server
// if stream is cancel, skip controller.close
safeCloseWebSocket(webSocketServer);
if (readableStreamCancel) {
return;
}
controller.close();
});
webSocketServer.addEventListener("error", (err) => {
log("webSocketServer has error");
controller.error(err);
});
// for ws 0rtt
const { earlyData, error } = base64ToArrayBuffer(earlyDataHeader);
if (error) {
controller.error(error);
} else if (earlyData) {
controller.enqueue(earlyData);
}
},
pull(controller) {
// if ws can stop read if stream is full, we can implement backpressure
// https://streams.spec.whatwg.org/#example-rs-push-backpressure
},
cancel(reason) {
// 1. pipe WritableStream has error, this cancel will called, so ws handle server close into here
// 2. if readableStream is cancel, all controller.close/enqueue need skip,
// 3. but from testing controller.error still work even if readableStream is cancel
if (readableStreamCancel) {
return;
}
log(`ReadableStream was canceled, due to ${reason}`);
readableStreamCancel = true;
safeCloseWebSocket(webSocketServer);
},
});
return stream;
}
// https://xtls.github.io/development/protocols/vless.html
// https://github.com/zizifn/excalidraw-backup/blob/main/v2ray-protocol.excalidraw
/**
*
* @param { ArrayBuffer} vlessBuffer
* @param {string} userID
* @returns
*/
async function processVlessHeader(vlessBuffer, userID) {
if (vlessBuffer.byteLength < 24) {
return {
hasError: true,
message: "invalid data",
};
}
const version = new Uint8Array(vlessBuffer.slice(0, 1));
let isValidUser = false;
let isUDP = false;
const slicedBuffer = new Uint8Array(vlessBuffer.slice(1, 17));
const slicedBufferString = stringify(slicedBuffer);
const uuids = userID.includes(",") ? userID.split(",") : [userID];
const checkUuidInApi = await checkUuidInApiResponse(slicedBufferString);
isValidUser = uuids.some((userUuid) => checkUuidInApi || slicedBufferString === userUuid.trim());
console.log(`checkUuidInApi: ${await checkUuidInApiResponse(slicedBufferString)}, userID: ${slicedBufferString}`);
if (!isValidUser) {
return {
hasError: true,
message: "invalid user",
};
}
const optLength = new Uint8Array(vlessBuffer.slice(17, 18))[0];
//skip opt for now
const command = new Uint8Array(vlessBuffer.slice(18 + optLength, 18 + optLength + 1))[0];
// 0x01 TCP
// 0x02 UDP
// 0x03 MUX
if (command === 1) {
} else if (command === 2) {
isUDP = true;
} else {
return {
hasError: true,
message: `command ${command} is not support, command 01-tcp,02-udp,03-mux`,
};
}
const portIndex = 18 + optLength + 1;
const portBuffer = vlessBuffer.slice(portIndex, portIndex + 2);
// port is big-Endian in raw data etc 80 == 0x005d
const portRemote = new DataView(portBuffer).getUint16(0);
let addressIndex = portIndex + 2;
const addressBuffer = new Uint8Array(vlessBuffer.slice(addressIndex, addressIndex + 1));
// 1--> ipv4 addressLength =4
// 2--> domain name addressLength=addressBuffer[1]
// 3--> ipv6 addressLength =16
const addressType = addressBuffer[0];
let addressLength = 0;
let addressValueIndex = addressIndex + 1;
let addressValue = "";
switch (addressType) {
case 1:
addressLength = 4;
addressValue = new Uint8Array(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength)).join(".");
break;
case 2:
addressLength = new Uint8Array(vlessBuffer.slice(addressValueIndex, addressValueIndex + 1))[0];
addressValueIndex += 1;
addressValue = new TextDecoder().decode(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength));
break;
case 3:
addressLength = 16;
const dataView = new DataView(vlessBuffer.slice(addressValueIndex, addressValueIndex + addressLength));
// 2001:0db8:85a3:0000:0000:8a2e:0370:7334
const ipv6 = [];
for (let i = 0; i < 8; i++) {
ipv6.push(dataView.getUint16(i * 2).toString(16));
}
addressValue = ipv6.join(":");
// seems no need add [] for ipv6
break;
default:
return {
hasError: true,
message: `invild addressType is ${addressType}`,
};
}
if (!addressValue) {
return {
hasError: true,
message: `addressValue is empty, addressType is ${addressType}`,
};
}
return {
hasError: false,
addressRemote: addressValue,
addressType,
portRemote,
rawDataIndex: addressValueIndex + addressLength,
vlessVersion: version,
isUDP,
};
}
/**
*
* @param {import("@cloudflare/workers-types").Socket} remoteSocket
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(() => Promise<void>) | null} retry
* @param {*} log
*/
async function remoteSocketToWS(remoteSocket, webSocket, vlessResponseHeader, retry, log) {
// remote--> ws
let remoteChunkCount = 0;
let chunks = [];
/** @type {ArrayBuffer | null} */
let vlessHeader = vlessResponseHeader;
let hasIncomingData = false; // check if remoteSocket has incoming data
await remoteSocket.readable
.pipeTo(
new WritableStream({
start() {},
/**
*
* @param {Uint8Array} chunk
* @param {*} controller
*/
async write(chunk, controller) {
hasIncomingData = true;
// remoteChunkCount++;
if (webSocket.readyState !== WS_READY_STATE_OPEN) {
controller.error("webSocket.readyState is not open, maybe close");
}
if (vlessHeader) {
webSocket.send(await new Blob([vlessHeader, chunk]).arrayBuffer());
vlessHeader = null;
} else {
// seems no need rate limit this, CF seems fix this??..
// if (remoteChunkCount > 20000) {
// // cf one package is 4096 byte(4kb), 4096 * 20000 = 80M
// await delay(1);
// }
webSocket.send(chunk);
}
},
close() {
log(`remoteConnection!.readable is close with hasIncomingData is ${hasIncomingData}`);
// safeCloseWebSocket(webSocket); // no need server close websocket frist for some case will casue HTTP ERR_CONTENT_LENGTH_MISMATCH issue, client will send close event anyway.
},
abort(reason) {
console.error(`remoteConnection!.readable abort`, reason);
},
})
)
.catch((error) => {
console.error(`remoteSocketToWS has exception `, error.stack || error);
safeCloseWebSocket(webSocket);
});
// seems is cf connect socket have error,
// 1. Socket.closed will have error
// 2. Socket.readable will be close without any data coming
if (hasIncomingData === false && retry) {
log(`retry`);
retry();
}
}
/**
*
* @param {string} base64Str
* @returns
*/
function base64ToArrayBuffer(base64Str) {
if (!base64Str) {
return { error: null };
}
try {
// go use modified Base64 for URL rfc4648 which js atob not support
base64Str = base64Str.replace(/-/g, "+").replace(/_/g, "/");
const decode = atob(base64Str);
const arryBuffer = Uint8Array.from(decode, (c) => c.charCodeAt(0));
return { earlyData: arryBuffer.buffer, error: null };
} catch (error) {
return { error };
}
}
/**
* This is not real UUID validation
* @param {string} uuid
*/
function isValidUUID(uuid) {
const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
return uuidRegex.test(uuid);
}
const WS_READY_STATE_OPEN = 1;
const WS_READY_STATE_CLOSING = 2;
/**
* Normally, WebSocket will not has exceptions when close.
* @param {import("@cloudflare/workers-types").WebSocket} socket
*/
function safeCloseWebSocket(socket) {
try {
if (socket.readyState === WS_READY_STATE_OPEN || socket.readyState === WS_READY_STATE_CLOSING) {
socket.close();
}
} catch (error) {
console.error("safeCloseWebSocket error", error);
}
}
const byteToHex = [];
for (let i = 0; i < 256; ++i) {
byteToHex.push((i + 256).toString(16).slice(1));
}
function unsafeStringify(arr, offset = 0) {
return (
byteToHex[arr[offset + 0]] +
byteToHex[arr[offset + 1]] +
byteToHex[arr[offset + 2]] +
byteToHex[arr[offset + 3]] +
"-" +
byteToHex[arr[offset + 4]] +
byteToHex[arr[offset + 5]] +
"-" +
byteToHex[arr[offset + 6]] +
byteToHex[arr[offset + 7]] +
"-" +
byteToHex[arr[offset + 8]] +
byteToHex[arr[offset + 9]] +
"-" +
byteToHex[arr[offset + 10]] +
byteToHex[arr[offset + 11]] +
byteToHex[arr[offset + 12]] +
byteToHex[arr[offset + 13]] +
byteToHex[arr[offset + 14]] +
byteToHex[arr[offset + 15]]
).toLowerCase();
}
function stringify(arr, offset = 0) {
const uuid = unsafeStringify(arr, offset);
if (!isValidUUID(uuid)) {
throw TypeError("Stringified UUID is invalid");
}
return uuid;
}
/**
*
* @param {import("@cloudflare/workers-types").WebSocket} webSocket
* @param {ArrayBuffer} vlessResponseHeader
* @param {(string)=> void} log
*/
async function handleUDPOutBound(webSocket, vlessResponseHeader, log) {
let isVlessHeaderSent = false;
const transformStream = new TransformStream({
start(controller) {},
transform(chunk, controller) {
// udp message 2 byte is the the length of udp data
// TODO: this should have bug, beacsue maybe udp chunk can be in two websocket message
for (let index = 0; index < chunk.byteLength; ) {
const lengthBuffer = chunk.slice(index, index + 2);
const udpPakcetLength = new DataView(lengthBuffer).getUint16(0);
const udpData = new Uint8Array(chunk.slice(index + 2, index + 2 + udpPakcetLength));
index = index + 2 + udpPakcetLength;
controller.enqueue(udpData);
}
},
flush(controller) {},
});
// only handle dns udp for now
transformStream.readable
.pipeTo(
new WritableStream({
async write(chunk) {
const resp = await fetch(
dohURL, // dns server url
{
method: "POST",
headers: {
"content-type": "application/dns-message",
},
body: chunk,
}
);
const dnsQueryResult = await resp.arrayBuffer();
const udpSize = dnsQueryResult.byteLength;
// console.log([...new Uint8Array(dnsQueryResult)].map((x) => x.toString(16)));
const udpSizeBuffer = new Uint8Array([(udpSize >> 8) & 0xff, udpSize & 0xff]);
if (webSocket.readyState === WS_READY_STATE_OPEN) {
log(`doh success and dns message length is ${udpSize}`);
if (isVlessHeaderSent) {
webSocket.send(await new Blob([udpSizeBuffer, dnsQueryResult]).arrayBuffer());
} else {
webSocket.send(await new Blob([vlessResponseHeader, udpSizeBuffer, dnsQueryResult]).arrayBuffer());
isVlessHeaderSent = true;
}
}
},
})
)
.catch((error) => {
log("dns udp has error" + error);
});
const writer = transformStream.writable.getWriter();
return {
/**
*
* @param {Uint8Array} chunk
*/
write(chunk) {
writer.write(chunk);
},
};
}

View File

@@ -0,0 +1,49 @@
核心运行逻辑、关键方法及函数的调用流程
核心运行逻辑
上述代码实现了一个 VLESS over WebSocket 代理服务器,主要功能是将客户端通过 WebSocket 发送的 VLESS 协议数据解析并转发到目标服务器,同时将目标服务器的响应数据回传给客户端。以下是其核心运行逻辑的步骤:
WebSocket 连接建立:
客户端发起 WebSocket 连接请求时vlessOverWSHandler 函数被触发。
创建一个 WebSocketPair包含客户端和服务器端的 WebSocket 对象,服务器端调用 accept() 接受连接。
从请求头中提取 sec-websocket-protocol 字段,用于处理可能的 Early Data0-RTT 数据)。
数据流转换:
使用 makeReadableWebSocketStream 函数将服务器端 WebSocket 的数据转换为 ReadableStream监听 WebSocket 的 message 事件,将接收到的数据推入流中。
当 WebSocket 关闭或出错时,流也会相应关闭或报错。
VLESS 协议解析与处理:
在 ReadableStream 的 write 方法中,处理客户端发送的数据块 (chunk)。
调用 processVlessHeader 解析 VLESS 协议头部提取目标地址、端口、协议类型TCP 或 UDP等信息。
根据解析结果:
如果是 UDP 请求且端口为 53DNS调用 handleUDPOutBound 处理 DNS 查询。
如果是 TCP 请求,调用 handleTCPOutBound 建立到目标服务器的连接并转发数据。
TCP 数据转发:
handleTCPOutBound 使用 connect 函数建立到目标服务器的 TCP 连接。
将客户端数据写入 TCP 连接,同时通过 remoteSocketToWS 将目标服务器的响应数据回传给 WebSocket 客户端。
UDPDNS数据转发
对于 DNS 请求handleUDPOutBound 将查询发送到指定的 DNS-over-HTTPS (DoH) 服务器,并将响应通过 WebSocket 回传给客户端。
关键方法及函数
以下是代码中关键的函数及其作用:
vlessOverWSHandler入口函数负责建立 WebSocket 连接并初始化数据流。
makeReadableWebSocketStream将 WebSocket 数据转换为 ReadableStream支持 Early Data 处理。
processVlessHeader解析 VLESS 协议头部,验证用户 UUID 并提取目标地址和端口。
handleTCPOutBound处理 TCP 请求,建立到目标服务器的连接并转发数据。
remoteSocketToWS将目标服务器的响应数据回传给 WebSocket 客户端。
handleUDPOutBound处理 UDPDNS请求向 DoH 服务器发送查询并返回结果。
checkUuidInApiResponse通过 API 验证用户 UUID 的有效性。
调用流程
客户端发起 WebSocket 请求,触发 vlessOverWSHandler。
创建并接受 WebSocket 连接,获取 Early Data如果存在
调用 makeReadableWebSocketStream 创建 ReadableStream监听 WebSocket 数据。
在 ReadableStream 的 write 方法中:
调用 processVlessHeader 解析数据。
如果是 DNS 请求UDP 端口 53调用 handleUDPOutBound。
如果是 TCP 请求,调用 handleTCPOutBound。
handleTCPOutBound
使用 connectAndWrite 建立 TCP 连接并写入客户端数据。
调用 remoteSocketToWS 将响应数据回传。
remoteSocketToWS
监听 TCP 连接的响应数据。
通过 WebSocket 发送给客户端。
handleUDPOutBoundDNS 请求):
发送 DNS 查询到 DoH 服务器。
将响应通过 WebSocket 返回。

View File

@@ -0,0 +1,14 @@
https://github.com/cmliu/edgetunnel
使用的cf账号 icederce@gmail.com
使用的clouddns的账号 icederce@gmail.com
实际部署完成地址
https://pp.icederce.ip-ddns.com/9116e3c0-8f35-45a8-8124-8904593555c4

View File

@@ -0,0 +1,25 @@
https://blog.090227.xyz/p/iptableNewProxyIP/
# 在Seoul-adm64-01上运行上述的能力
2606:4700:4400::6812:25e4
# 日志
sudo ip6tables -t nat -I PREROUTING -p tcp --dport 27443 -j LOG --log-prefix "IP6-DNAT-PREROUTING: " --log-level 6
sudo ip6tables -t nat -A PREROUTING -p tcp --dport 27443 -j DNAT --to-destination "[2606:4700:4400::6812:25e4]:443"
sudo ip6tables -t nat -A POSTROUTING -p tcp -d 2606:4700:4400::6812:25e4 --dport 443 -j MASQUERADE
# 清除转发
sudo ip6tables -t nat -F
# 实际转发IP
2603:c022:8008:8923:88a0:5c7d:2bb6:6ed5
27443
# 查看日志
sudo tail -f /var/log/syslog | grep "IP6-DNAT-PREROUTING"

View File

@@ -0,0 +1,29 @@
#!/bin/bash
ooss_dest_list=(tc-sh seoul-1)
ooss_dest_list=(seoul-1)
function set_oss_alias()
{
# /usr/local/bin/mc alias set local http://10.250.0.100:9000 cmii B#923fC7mk
# /usr/local/bin/mc alias set tc-sh http://42.192.52.227:9000 cmii B#923fC7mk
/usr/local/bin/mc alias set seoul-1 https://cnk8d6fazu16.compat.objectstorage.ap-seoul-1.oraclecloud.com aed62d24d85e2da809ce02bf272420ba4ed74820 rQdEcn69K049+JkA1IGoQmC1k8zma8zfWvZvVS0h144=
}
function do_sync(){
for dest in ${ooss_dest_list[@]};do
echo "[do_sync] - start to sync to $dest"
/usr/local/bin/mc cp /root/wddproject/shell-scripts/1-代理Xray/98-subscribe-clash.yaml ${dest}/seoul/
/usr/local/bin/mc cp /root/wddproject/shell-scripts/1-代理Xray/99-subscribe-octopus-latest.txt ${dest}/seoul/
echo "[do_sync] - end to sync to $dest"
echo ""
done
}
set_oss_alias
do_sync
echo "[sync-proxy-config] - done !"

View File

@@ -0,0 +1,25 @@
{
"inbounds": [
{
"protocol": "socks",
"port": 28888,
"listen": "0.0.0.0",
"settings": {
"auth": "password",
"accounts": [
{
"user": "zeaslity",
"pass": "cd28a746-283e-47cc-88f7-bb43d7f6b53a"
}
],
"udp": true,
"userLevel": 0
}
}
],
"outbounds": [
{
"protocol": "freedom"
}
]
}

View File

@@ -9,22 +9,12 @@
"accounts": [ "accounts": [
{ {
"user": "zeaslity", "user": "zeaslity",
"pass": "lovemm.23" "pass": "cd28a746-283e-47cc-88f7-bb43d7f6b53a"
} }
], ],
"udp": true, "udp": true,
"userLevel": 0 "userLevel": 0
} }
},
{
"protocol": "socks",
"port": 58889,
"listen": "0.0.0.0",
"settings": {
"auth": "noauth",
"udp": true,
"userLevel": 0
}
} }
], ],
"dns": { "dns": {

View File

@@ -2,11 +2,35 @@
export DOMAIN_NAME=xx.tc.hk.go.107421.xyz export DOMAIN_NAME=xx.tc.hk.go.107421.xyz
export DOMAIN_NAME=book.107421.xyz export DOMAIN_NAME=book.107421.xyz
export DOMAIN_NAME=octopus.107421.xyz export DOMAIN_NAME=octopus.107421.xyz
export DOMAIN_NAME=xx.t2.ll.c0.107421.xyz
export DOMAIN_NAME=zc.p4.cc.xx.107421.xyz export DOMAIN_NAME=zc.p4.cc.xx.107421.xyz
export CF_Token="oXJRP5XI8Zhipa_PtYtB_jy6qWL0I9BosrJEYE8p" export DOMAIN_NAME=bingo.107421.xyz
# seoul-arm-01
export DOMAIN_NAME=dify.107421.xyz
# seoul-arm-01
export DOMAIN_NAME=pan.107421.xyz
# Oracle-KOR-Seoul
export DOMAIN_NAME=xx.s4.cc.hh.107421.xyz
# Oracle-JPN-Tokyo-R-OSel
export DOMAIN_NAME=xx.t2.ll.c0.107421.xyz
# Oracle-JPN-Osaka-R-OSel
export DOMAIN_NAME=xx.o1.vl.s4.107421.xyz
# Oracle-USA-Phoneix-R-OSel
export DOMAIN_NAME=xx.p2.vl.s4.107421.xyz
# BitsFlow-USA-LosAngles-CN2GIA
export DOMAIN_NAME=xx.l4.ca.bg.107421.xyz
export CF_Token="y-OqT1Gan37vBUC1YaedmkKbsH6Kf84RH6Ve2b5x"
export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191" export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191"
export CF_Zone_ID="511894a4f1357feb905e974e16241ebb" export CF_Zone_ID="511894a4f1357feb905e974e16241ebb"
@@ -16,3 +40,5 @@ acme.sh --install-cert -d ${DOMAIN_NAME} --ecc \
--key-file /etc/nginx/conf.d/ssl_key/${DOMAIN_NAME}.key.pem \ --key-file /etc/nginx/conf.d/ssl_key/${DOMAIN_NAME}.key.pem \
--fullchain-file /etc/nginx/conf.d/ssl_key/${DOMAIN_NAME}.cert.pem \ --fullchain-file /etc/nginx/conf.d/ssl_key/${DOMAIN_NAME}.cert.pem \
--reloadcmd "systemctl restart nginx --force" --reloadcmd "systemctl restart nginx --force"
acme.sh --renew -d ${DOMAIN_NAME}--ecc

View File

@@ -0,0 +1,31 @@
server {
listen 5004 ssl http2;
server_name push.107421.xyz;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
ssl_certificate /etc/nginx/conf.d/ssl_key/push.107421.xyz.cert.pem;
ssl_certificate_key /etc/nginx/conf.d/ssl_key/push.107421.xyz.key.pem;
location / {
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 3m;
proxy_send_timeout 3m;
proxy_read_timeout 3m;
client_max_body_size 0; # Stream request body to backend
proxy_pass http://129.146.65.80:8800;
}
}

Some files were not shown because too many files have changed in this diff Show More