From ee93d8dc8c2003950e2a8c3ff1873a53089ef7c0 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Fri, 7 Mar 2025 17:14:52 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A4=A7=E9=87=8F=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 0-部署应用/Oracle-Cloud/将内存挂载为硬盘.sh | 17 + .../AI-Dify/dify.107421-nginx-seoul-arm-01.conf | 122 ++ .../AI-Dify/docker-compose-modified.yaml | 542 +++++++++ .../AI-Dify/docker-compose-官方.yaml | 1010 +++++++++++++++++ .../AI-Dify/docker-entrypoint-mount.sh | 42 + .../Phoneix-arm64-01/AI-Dify/squid.conf.template | 54 + .../Phoneix-arm64-01/AI-Dify/设置的环境.env | 736 ++++++++++++ .../AI-Dify/docker-compose-modified.yaml | 59 + 0-部署应用/Phoneix-arm64-02/基本信息.txt | 4 + .../2-17-deploy.env | 0 .../app-flowy-docker-compose.yaml | 0 .../appflowy-client.yaml | 0 .../deploy.env | 0 .../Tokyo-arm64-01/telegram-ai-bot/README_CN.md | 514 +++++++++ .../telegram-ai-bot/docker-compose.yaml | 22 + .../Tokyo-arm64-01/telegram-ai-bot/参考项目.txt | 2 + 1-代理Xray/0-香港节点/0-分层代理回落.json | 34 +- 1-代理Xray/98-subscribe-clash.yaml | 10 +- 2-NGINX相关/107421.xyz/21-申请证书.sh | 4 + 19 files changed, 3131 insertions(+), 41 deletions(-) create mode 100644 0-部署应用/Oracle-Cloud/将内存挂载为硬盘.sh create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/dify.107421-nginx-seoul-arm-01.conf create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-modified.yaml create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-官方.yaml create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/docker-entrypoint-mount.sh create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/squid.conf.template create mode 100644 0-部署应用/Phoneix-arm64-01/AI-Dify/设置的环境.env create mode 100644 0-部署应用/Phoneix-arm64-02/AI-Dify/docker-compose-modified.yaml create mode 100644 0-部署应用/Phoneix-arm64-02/基本信息.txt rename 0-部署应用/Tokyo-arm64-01/{AppFlowy-Notion替代品 => AppFlowy-Notion替代品-废弃}/2-17-deploy.env (100%) rename 0-部署应用/Tokyo-arm64-01/{AppFlowy-Notion替代品 => AppFlowy-Notion替代品-废弃}/app-flowy-docker-compose.yaml (100%) rename 0-部署应用/Tokyo-arm64-01/{AppFlowy-Notion替代品 => AppFlowy-Notion替代品-废弃}/appflowy-client.yaml (100%) rename 0-部署应用/Tokyo-arm64-01/{AppFlowy-Notion替代品 => AppFlowy-Notion替代品-废弃}/deploy.env (100%) create mode 100644 0-部署应用/Tokyo-arm64-01/telegram-ai-bot/README_CN.md create mode 100644 0-部署应用/Tokyo-arm64-01/telegram-ai-bot/docker-compose.yaml create mode 100644 0-部署应用/Tokyo-arm64-01/telegram-ai-bot/参考项目.txt diff --git a/0-部署应用/Oracle-Cloud/将内存挂载为硬盘.sh b/0-部署应用/Oracle-Cloud/将内存挂载为硬盘.sh new file mode 100644 index 0000000..4f835b5 --- /dev/null +++ b/0-部署应用/Oracle-Cloud/将内存挂载为硬盘.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# 1GB 1048576 5G 1048576 8G 8388608 +sudo modprobe brd rd_nr=1 rd_size=8388608 max_part=1 + +sudo mkfs.ext4 /dev/ram0 + +sudo mkdir /mnt/ramdisk + +sudo mount /dev/ram0 /mnt/ramdisk + + +# 测试 +touch /mnt/ramdisk/test.txt + +# 清理 +sudo umount /mnt/ramdisk diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/dify.107421-nginx-seoul-arm-01.conf b/0-部署应用/Phoneix-arm64-01/AI-Dify/dify.107421-nginx-seoul-arm-01.conf new file mode 100644 index 0000000..da2b9d6 --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/dify.107421-nginx-seoul-arm-01.conf @@ -0,0 +1,122 @@ +server { + server_name dify.107421.xyz; + listen 80 ; + return 301 https://dify.107421.xyz$request_uri; +} + +server { + listen 443 ssl; + server_name dify.107421.xyz; + + ssl_certificate /etc/nginx/conf.d/ssl_key/dify.107421.xyz.cert.pem; + ssl_certificate_key /etc/nginx/conf.d/ssl_key/dify.107421.xyz.key.pem; + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; + ssl_session_tickets off; + + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; + ssl_prefer_server_ciphers off; + + location /console/api { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:5001; + } + + location /api { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:5001; + } + + location /v1 { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:5001; + + } + + location /files { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:5001; + } + + location /explore { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:3000; + } + + location /e { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_set_header Dify-Hook-Url ://; + proxy_pass http://129.146.65.80:5002; + } + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_buffering off; + proxy_send_timeout 10000s; + proxy_read_timeout 10000s; + client_body_timeout 6000s; + proxy_pass http://129.146.65.80:3000; + } + + +} diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-modified.yaml b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-modified.yaml new file mode 100644 index 0000000..53234bf --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-modified.yaml @@ -0,0 +1,542 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + +x-shared-env: &shared-api-worker-env + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} + SERVICE_API_URL: ${SERVICE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + APP_WEB_URL: ${APP_WEB_URL:-} + FILES_URL: ${FILES_URL:-} + LOG_LEVEL: ${LOG_LEVEL:-INFO} + LOG_FILE: ${LOG_FILE:-/app/logs/server.log} + LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} + LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} + LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} + LOG_TZ: ${LOG_TZ:-UTC} + DEBUG: ${DEBUG:-false} + FLASK_DEBUG: ${FLASK_DEBUG:-false} + SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} + INIT_PASSWORD: ${INIT_PASSWORD:-} + DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} + CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} + OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} + MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} + FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} + ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} + REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} + APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} + APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} + DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} + DIFY_PORT: ${DIFY_PORT:-5001} + SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} + SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} + SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} + CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} + GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} + CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-} + CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} + CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} + CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} + API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} + API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} + DB_USERNAME: ${DB_USERNAME:-postgres} + DB_PASSWORD: ${DB_PASSWORD:-difyai123456} + DB_HOST: ${DB_HOST:-db} + DB_PORT: ${DB_PORT:-5432} + DB_DATABASE: ${DB_DATABASE:-dify} + SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} + SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} + SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} + POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} + POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} + POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} + POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} + REDIS_HOST: ${REDIS_HOST:-redis} + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_USERNAME: ${REDIS_USERNAME:-} + REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} + REDIS_USE_SSL: ${REDIS_USE_SSL:-false} + REDIS_DB: ${REDIS_DB:-0} + REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} + REDIS_SENTINELS: ${REDIS_SENTINELS:-} + REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} + REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} + REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} + REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} + REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} + REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} + REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} + CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} + BROKER_USE_SSL: ${BROKER_USE_SSL:-false} + CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} + CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} + CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} + WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} + CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} + STORAGE_TYPE: ${STORAGE_TYPE:-opendal} + OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} + OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + S3_ENDPOINT: ${S3_ENDPOINT:-} + S3_REGION: ${S3_REGION:-us-east-1} + S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} + S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} + S3_SECRET_KEY: ${S3_SECRET_KEY:-} + S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} + AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} + AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} + AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} + GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} + GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} + ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} + ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} + ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} + ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} + ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} + ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} + ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} + TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} + TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} + TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} + TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} + TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com} + OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} + OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} + OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} + OCI_REGION: ${OCI_REGION:-us-ashburn-1} + HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} + HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} + HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} + HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} + VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} + VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} + VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} + VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} + VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} + BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} + BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} + BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} + BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} + SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} + SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} + SUPABASE_URL: ${SUPABASE_URL:-your-server-url} + VECTOR_STORE: ${VECTOR_STORE:-weaviate} + WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} + WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} + QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} + QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} + MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530} + MILVUS_TOKEN: ${MILVUS_TOKEN:-} + MILVUS_USER: ${MILVUS_USER:-root} + MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus} + MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} + MYSCALE_HOST: ${MYSCALE_HOST:-myscale} + MYSCALE_PORT: ${MYSCALE_PORT:-8123} + MYSCALE_USER: ${MYSCALE_USER:-default} + MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} + MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} + MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} + COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} + COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} + COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} + COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} + COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} + PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} + PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} + PGVECTOR_USER: ${PGVECTOR_USER:-postgres} + PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} + PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} + PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} + PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} + PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} + PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} + PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} + PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} + PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} + ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} + ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} + ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} + ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} + ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} + ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} + ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} + ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} + ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} + ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} + ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} + ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} + TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} + TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} + TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} + TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} + TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} + TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} + TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} + TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} + TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} + TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} + TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} + TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} + TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} + TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} + TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} + TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} + TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} + CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} + CHROMA_PORT: ${CHROMA_PORT:-8000} + CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} + CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} + CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} + CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} + ORACLE_HOST: ${ORACLE_HOST:-oracle} + ORACLE_PORT: ${ORACLE_PORT:-1521} + ORACLE_USER: ${ORACLE_USER:-dify} + ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} + ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1} + RELYT_HOST: ${RELYT_HOST:-db} + RELYT_PORT: ${RELYT_PORT:-5432} + RELYT_USER: ${RELYT_USER:-postgres} + RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} + RELYT_DATABASE: ${RELYT_DATABASE:-postgres} + OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} + OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} + OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} + OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} + OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} + TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} + TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} + TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} + TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} + TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} + TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} + TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} + ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} + ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} + ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} + ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + KIBANA_PORT: ${KIBANA_PORT:-5601} + BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} + BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} + BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} + BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} + BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} + BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} + BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} + VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} + VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} + VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} + VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} + VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} + VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} + VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} + LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} + LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} + LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm} + OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} + OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} + OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} + OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} + OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} + UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} + UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} + UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} + ETL_TYPE: ${ETL_TYPE:-dify} + UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} + UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} + SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} + PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} + CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} + MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} + UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} + UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} + UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} + SENTRY_DSN: ${SENTRY_DSN:-} + API_SENTRY_DSN: ${API_SENTRY_DSN:-} + API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} + NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} + NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} + NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} + MAIL_TYPE: ${MAIL_TYPE:-resend} + MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} + RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} + RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} + SMTP_SERVER: ${SMTP_SERVER:-} + SMTP_PORT: ${SMTP_PORT:-465} + SMTP_USERNAME: ${SMTP_USERNAME:-} + SMTP_PASSWORD: ${SMTP_PASSWORD:-} + SMTP_USE_TLS: ${SMTP_USE_TLS:-true} + SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} + INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} + RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} + CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} + CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} + CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} + CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} + CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} + CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} + CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000} + CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} + CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} + CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} + CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} + CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} + WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} + WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} + WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} + MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} + WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} + WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} + HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} + HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} + SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} + SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + PGUSER: ${PGUSER:-${DB_USERNAME}} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} + POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} + SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} + PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} + PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} + OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} + OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} + OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} + OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_PORT: ${NGINX_PORT:-80} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} + CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} + SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} + SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} + SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} + SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} + EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} + EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} + POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} + POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} + POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} + POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} + POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} + POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} + CSP_WHITELIST: ${CSP_WHITELIST:-} + CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} + MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + +services: + # API service + api: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + volumes: + # Mount the storage directory to the container, for storing user files. + - /mnt/ramdisk/dify-api/storage:/app/api/storage + network_mode: "host" + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + volumes: + # Mount the storage directory to the container, for storing user files. + - /mnt/ramdisk/dify-api/storage:/app/api/storage + network_mode: "host" + + # Frontend web application. + web: + image: langgenius/dify-web:0.15.3 + restart: always + ports: + - '3000:3000' + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - /mnt/ramdisk/sandbox/dependencies:/dependencies + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - default + + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.2-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + ports: + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - /mnt/ramdisk/plugin_daemon:/app/storage + + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + # ssrf_proxy: + # image: ubuntu/squid:latest + # restart: always + # volumes: + # - /mnt/ramdisk/ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + # - /mnt/ramdisk/ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + # entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + # environment: + # # pls clearly modify the squid env vars to fit your network environment. + # HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + # COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + # REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + # SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + # SANDBOX_PORT: ${SANDBOX_PORT:-8194} + # networks: + # - ssrf_proxy_network + # - default + +# networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + # ssrf_proxy_network: + # driver: bridge diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-官方.yaml b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-官方.yaml new file mode 100644 index 0000000..e0905d9 --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-compose-官方.yaml @@ -0,0 +1,1010 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + +x-shared-env: &shared-api-worker-env + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} + SERVICE_API_URL: ${SERVICE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + APP_WEB_URL: ${APP_WEB_URL:-} + FILES_URL: ${FILES_URL:-} + LOG_LEVEL: ${LOG_LEVEL:-INFO} + LOG_FILE: ${LOG_FILE:-/app/logs/server.log} + LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} + LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} + LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} + LOG_TZ: ${LOG_TZ:-UTC} + DEBUG: ${DEBUG:-false} + FLASK_DEBUG: ${FLASK_DEBUG:-false} + SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} + INIT_PASSWORD: ${INIT_PASSWORD:-} + DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} + CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} + OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} + MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} + FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} + ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} + REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} + APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} + APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} + DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} + DIFY_PORT: ${DIFY_PORT:-5001} + SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} + SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} + SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} + CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} + GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} + CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-} + CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} + CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} + CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} + API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} + API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} + DB_USERNAME: ${DB_USERNAME:-postgres} + DB_PASSWORD: ${DB_PASSWORD:-difyai123456} + DB_HOST: ${DB_HOST:-db} + DB_PORT: ${DB_PORT:-5432} + DB_DATABASE: ${DB_DATABASE:-dify} + SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} + SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} + SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} + POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} + POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} + POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} + POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} + REDIS_HOST: ${REDIS_HOST:-redis} + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_USERNAME: ${REDIS_USERNAME:-} + REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} + REDIS_USE_SSL: ${REDIS_USE_SSL:-false} + REDIS_DB: ${REDIS_DB:-0} + REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} + REDIS_SENTINELS: ${REDIS_SENTINELS:-} + REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} + REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} + REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} + REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} + REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} + REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} + REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} + CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} + BROKER_USE_SSL: ${BROKER_USE_SSL:-false} + CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} + CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} + CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} + WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} + CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} + STORAGE_TYPE: ${STORAGE_TYPE:-opendal} + OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} + OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + S3_ENDPOINT: ${S3_ENDPOINT:-} + S3_REGION: ${S3_REGION:-us-east-1} + S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} + S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} + S3_SECRET_KEY: ${S3_SECRET_KEY:-} + S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} + AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} + AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} + AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} + GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} + GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} + ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} + ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} + ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} + ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} + ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} + ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} + ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} + TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} + TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} + TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} + TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} + TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com} + OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} + OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} + OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} + OCI_REGION: ${OCI_REGION:-us-ashburn-1} + HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} + HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} + HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} + HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} + VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} + VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} + VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} + VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} + VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} + BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} + BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} + BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} + BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} + SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} + SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} + SUPABASE_URL: ${SUPABASE_URL:-your-server-url} + VECTOR_STORE: ${VECTOR_STORE:-weaviate} + WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} + WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} + QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} + QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} + MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530} + MILVUS_TOKEN: ${MILVUS_TOKEN:-} + MILVUS_USER: ${MILVUS_USER:-root} + MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus} + MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} + MYSCALE_HOST: ${MYSCALE_HOST:-myscale} + MYSCALE_PORT: ${MYSCALE_PORT:-8123} + MYSCALE_USER: ${MYSCALE_USER:-default} + MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} + MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} + MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} + COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} + COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} + COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} + COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} + COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} + PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} + PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} + PGVECTOR_USER: ${PGVECTOR_USER:-postgres} + PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} + PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} + PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} + PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} + PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} + PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} + PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} + PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} + PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} + ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} + ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} + ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} + ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} + ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} + ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} + ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} + ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} + ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} + ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} + ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} + ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} + TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} + TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} + TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} + TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} + TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} + TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} + TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} + TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} + TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} + TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} + TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} + TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} + TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} + TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} + TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} + TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} + TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} + CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} + CHROMA_PORT: ${CHROMA_PORT:-8000} + CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} + CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} + CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} + CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} + ORACLE_HOST: ${ORACLE_HOST:-oracle} + ORACLE_PORT: ${ORACLE_PORT:-1521} + ORACLE_USER: ${ORACLE_USER:-dify} + ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} + ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1} + RELYT_HOST: ${RELYT_HOST:-db} + RELYT_PORT: ${RELYT_PORT:-5432} + RELYT_USER: ${RELYT_USER:-postgres} + RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} + RELYT_DATABASE: ${RELYT_DATABASE:-postgres} + OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} + OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} + OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} + OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} + OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} + TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} + TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} + TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} + TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} + TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} + TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} + TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} + ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} + ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} + ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} + ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + KIBANA_PORT: ${KIBANA_PORT:-5601} + BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} + BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} + BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} + BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} + BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} + BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} + BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} + VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} + VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} + VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} + VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} + VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} + VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} + VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} + LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} + LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} + LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm} + OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} + OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} + OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} + OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} + OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} + UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} + UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} + UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} + ETL_TYPE: ${ETL_TYPE:-dify} + UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} + UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} + SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} + PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} + CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} + MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} + UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} + UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} + UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} + SENTRY_DSN: ${SENTRY_DSN:-} + API_SENTRY_DSN: ${API_SENTRY_DSN:-} + API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} + NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} + NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} + NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} + MAIL_TYPE: ${MAIL_TYPE:-resend} + MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} + RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} + RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} + SMTP_SERVER: ${SMTP_SERVER:-} + SMTP_PORT: ${SMTP_PORT:-465} + SMTP_USERNAME: ${SMTP_USERNAME:-} + SMTP_PASSWORD: ${SMTP_PASSWORD:-} + SMTP_USE_TLS: ${SMTP_USE_TLS:-true} + SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} + INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} + RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} + CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} + CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} + CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} + CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} + CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} + CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} + CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000} + CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} + CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} + CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} + CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} + CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} + WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} + WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} + WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} + MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} + WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} + WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} + HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} + HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} + SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} + SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + PGUSER: ${PGUSER:-${DB_USERNAME}} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} + POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} + SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} + PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} + PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} + OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} + OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} + OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} + OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_PORT: ${NGINX_PORT:-80} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} + CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} + SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} + SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} + SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} + SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} + EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} + EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} + POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} + POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} + POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} + POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} + POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} + POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} + CSP_WHITELIST: ${CSP_WHITELIST:-} + CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} + MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + +services: + # API service + api: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:0.15.3 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:0.15.3 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + ports: + - '${EXPOSE_DB_PORT:-5432}:5432' + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ./volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.10 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.2-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + ports: + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - ./volumes/plugin_daemon:/app/storage + + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + - ./volumes/certbot/logs:/var/log/letsencrypt + - ./volumes/certbot/conf/live:/etc/letsencrypt/live + - ./certbot/update-cert.template.txt:/update-cert.template.txt + - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ./nginx/https.conf.template:/etc/nginx/https.conf.template + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ./nginx/ssl:/etc/ssl # cert dir (legacy) + - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ./volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ./volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + container_name: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ./volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215 + profiles: + - oceanbase + restart: always + volumes: + - ./volumes/oceanbase/data:/root/ob + - ./volumes/oceanbase/conf:/root/.obd/cluster + - ./volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OB_SERVER_IP: '127.0.0.1' + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ./startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + container_name: milvus-etcd + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ./volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + container_name: milvus-minio + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ./volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + container_name: milvus-standalone + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ./volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + ports: + - 19530:19530 + - 9091:9091 + networks: + - milvus + + # Opensearch vector database + opensearch: + container_name: opensearch + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ./volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + container_name: opensearch-dashboards + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # MyScale vector database + myscale: + container_name: myscale + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ./volumes/myscale/data:/var/lib/clickhouse + - ./volumes/myscale/log:/var/log/clickhouse-server + - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ./volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-entrypoint-mount.sh b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-entrypoint-mount.sh new file mode 100644 index 0000000..3e837be --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/docker-entrypoint-mount.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Modified based on Squid OCI image entrypoint + +# This entrypoint aims to forward the squid logs to stdout to assist users of +# common container related tooling (e.g., kubernetes, docker-compose, etc) to +# access the service logs. + +# Moreover, it invokes the squid binary, leaving all the desired parameters to +# be provided by the "command" passed to the spawned container. If no command +# is provided by the user, the default behavior (as per the CMD statement in +# the Dockerfile) will be to use Ubuntu's default configuration [1] and run +# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided +# systemd unit. + +# [1] The default configuration is changed in the Dockerfile to allow local +# network connections. See the Dockerfile for further information. + +echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process" +if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then + /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1 +fi + +tail -F /var/log/squid/access.log 2>/dev/null & +tail -F /var/log/squid/error.log 2>/dev/null & +tail -F /var/log/squid/store.log 2>/dev/null & +tail -F /var/log/squid/cache.log 2>/dev/null & + +# Replace environment variables in the template and output to the squid.conf +echo "[ENTRYPOINT] replacing environment variables in the template" +awk '{ + while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) { + var = substr($0, RSTART+2, RLENGTH-3) + val = ENVIRON[var] + $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH) + } + print +}' /etc/squid/squid.conf.template > /etc/squid/squid.conf + +/usr/sbin/squid -Nz +echo "[ENTRYPOINT] starting squid" +/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1 \ No newline at end of file diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/squid.conf.template b/0-部署应用/Phoneix-arm64-01/AI-Dify/squid.conf.template new file mode 100644 index 0000000..aa68299 --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/squid.conf.template @@ -0,0 +1,54 @@ +acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN) +acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) +acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) +acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines +acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) +acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines +acl SSL_ports port 443 +# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +include /etc/squid/conf.d/*.conf +http_access deny all + +################################## Proxy Server ################################ +http_port ${HTTP_PORT} +coredump_dir ${COREDUMP_DIR} +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims +refresh_pattern \/InRelease$ 0 0% 0 refresh-ims +refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern . 0 20% 4320 + + +# cache_dir ufs /var/spool/squid 100 16 256 +# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks +# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default + +################################## Reverse Proxy To Sandbox ################################ +http_port ${REVERSE_PROXY_PORT} accel vhost +cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver +acl src_all src all +http_access allow src_all + + +docker exec -it dify_api_1 telnet 10.0.0.247 6379 \ No newline at end of file diff --git a/0-部署应用/Phoneix-arm64-01/AI-Dify/设置的环境.env b/0-部署应用/Phoneix-arm64-01/AI-Dify/设置的环境.env new file mode 100644 index 0000000..54a4643 --- /dev/null +++ b/0-部署应用/Phoneix-arm64-01/AI-Dify/设置的环境.env @@ -0,0 +1,736 @@ +# ------------------------------ +# Environment Variables for API service & worker +# ------------------------------ + +# ------------------------------ +# Common Variables +# ------------------------------ + +# The backend URL of the console API, +# used to concatenate the authorization callback. +# If empty, it is the same domain. +# Example: https://api.console.dify.ai +CONSOLE_API_URL= + +# The front-end URL of the console web, +# used to concatenate some front-end addresses and for CORS configuration use. +# If empty, it is the same domain. +# Example: https://console.dify.ai +CONSOLE_WEB_URL= + +# Service API Url, +# used to display Service API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://api.dify.ai +SERVICE_API_URL= + +# WebApp API backend Url, +# used to declare the back-end URL for the front-end API. +# If empty, it is the same domain. +# Example: https://api.app.dify.ai +APP_API_URL= + +# WebApp Url, +# used to display WebAPP API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://app.dify.ai +APP_WEB_URL= + +# File preview or download Url prefix. +# used to display File preview or download Url to the front-end or as Multi-model inputs; +# Url is signed and has expiration time. +FILES_URL= + +# ------------------------------ +# Server Configuration +# ------------------------------ + +# The log level for the application. +# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` +LOG_LEVEL=INFO +# Log file path +LOG_FILE=/root/app-install/dify/logs/server.log +# Log file max size, the unit is MB +LOG_FILE_MAX_SIZE=50 +# Log file max backup count +LOG_FILE_BACKUP_COUNT=5 +# Log dateformat +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +# Log Timezone +LOG_TZ=Asia/Shanghai + +# Debug mode, default is false. +# It is recommended to turn on this configuration for local development +# to prevent some problems caused by monkey patch. +DEBUG=false + +# Flask debug mode, it can output trace information at the interface when turned on, +# which is convenient for debugging. +FLASK_DEBUG=false + +# A secretkey that is used for securely signing the session cookie +# and encrypting sensitive information on the database. +# You can generate a strong key using `openssl rand -base64 42`. +SECRET_KEY=bBj28uxctAwybtLFUr1Zlc3OKlTG5SsUiz+W9v71s0+YytuD8+Um8Qdy + +# Password for admin user initialization. +# If left unset, admin user will not be prompted for a password +# when creating the initial admin account. +# The length of the password cannot exceed 30 charactors. +INIT_PASSWORD=loveff.cxc.23 + +# Deployment environment. +# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. +# Testing environment. There will be a distinct color label on the front-end page, +# indicating that this environment is a testing environment. +DEPLOY_ENV=PRODUCTION + +# Whether to enable the version check policy. +# If set to empty, https://updates.dify.ai will be called for version check. +CHECK_UPDATE_URL=https://updates.dify.ai + +# Used to change the OpenAI base address, default is https://api.openai.com/v1. +# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, +# or when a local model provides OpenAI compatible API, it can be replaced. +OPENAI_API_BASE=https://api.openai.com/v1 + +# When enabled, migrations will be executed prior to application startup +# and the application will start after the migrations have completed. +MIGRATION_ENABLED=true + +# File Access Time specifies a time interval in seconds for the file to be accessed. +# The default value is 300 seconds. +FILES_ACCESS_TIMEOUT=300 + +# Access token expiration time in minutes +ACCESS_TOKEN_EXPIRE_MINUTES=60 + +# Refresh token expiration time in days +REFRESH_TOKEN_EXPIRE_DAYS=30 + +# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 + +# ------------------------------ +# Container Startup Related Configuration +# Only effective when starting with docker image or docker-compose. +# ------------------------------ + +# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +DIFY_BIND_ADDRESS=0.0.0.0 + +# API service binding port number, default 5001. +DIFY_PORT=5001 + +# The number of API server workers, i.e., the number of workers. +# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent +# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers +SERVER_WORKER_AMOUNT=2 + +# Defaults to gevent. If using windows, it can be switched to sync or solo. +SERVER_WORKER_CLASS=gevent + +# Default number of worker connections, the default is 10. +SERVER_WORKER_CONNECTIONS=10 + +# Similar to SERVER_WORKER_CLASS. +# If using windows, it can be switched to sync or solo. +CELERY_WORKER_CLASS= + +# Request handling timeout. The default is 200, +# it is recommended to set it to 360 to support a longer sse connection time. +GUNICORN_TIMEOUT=360 + +# The number of Celery workers. The default is 1, and can be set as needed. +CELERY_WORKER_AMOUNT= + +# Flag indicating whether to enable autoscaling of Celery workers. +# +# Autoscaling is useful when tasks are CPU intensive and can be dynamically +# allocated and deallocated based on the workload. +# +# When autoscaling is enabled, the maximum and minimum number of workers can +# be specified. The autoscaling algorithm will dynamically adjust the number +# of workers within the specified range. +# +# Default is false (i.e., autoscaling is disabled). +# +# Example: +# CELERY_AUTO_SCALE=true +CELERY_AUTO_SCALE=true + +# The maximum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MAX_WORKERS=3 + +# The minimum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MIN_WORKERS=1 + +# API Tool configuration +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 + + +# ------------------------------ +# Database Configuration +# The database uses PostgreSQL. Please use the public schema. +# It is consistent with the configuration in the 'db' service below. +# ------------------------------ + +DB_USERNAME=postgres +DB_PASSWORD=V2rayStrP@ss +DB_HOST=10.0.0.247 +DB_PORT=5432 +DB_DATABASE=dify +# The size of the database connection pool. +# The default is 30 connections, which can be appropriately increased. +SQLALCHEMY_POOL_SIZE=30 +# Database connection pool recycling time, the default is 3600 seconds. +SQLALCHEMY_POOL_RECYCLE=3600 +# Whether to print SQL, default is false. +SQLALCHEMY_ECHO=false + +# Maximum number of connections to the database +# Default is 100 +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +POSTGRES_MAX_CONNECTIONS=100 + +# Sets the amount of shared memory used for postgres's shared buffers. +# Default is 128MB +# Recommended value: 25% of available memory +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS +POSTGRES_SHARED_BUFFERS=4096MB + +# Sets the amount of memory used by each database worker for working space. +# Default is 4MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM +POSTGRES_WORK_MEM=64MB + +# Sets the amount of memory reserved for maintenance activities. +# Default is 64MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM +POSTGRES_MAINTENANCE_WORK_MEM=128MB + +# Sets the planner's assumption about the effective cache size. +# Default is 4096MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE +POSTGRES_EFFECTIVE_CACHE_SIZE=8192MB + +# ------------------------------ +# Redis Configuration +# This Redis configuration is used for caching and for pub/sub during conversation. +# ------------------------------ + +REDIS_HOST=10.0.0.247 +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=V2rayStrP@ss +REDIS_USE_SSL=false +REDIS_DB=0 + +# Whether to use Redis Sentinel mode. +# If set to true, the application will automatically discover and connect to the master node through Sentinel. +REDIS_USE_SENTINEL=false + +# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. +# Format: `:,:,:` +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 + +# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. +# Format: `:,:,:` +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= + +# ------------------------------ +# Celery Configuration +# ------------------------------ + +# Use redis as the broker, and redis db 1 for celery broker. +# Format as follows: `redis://:@:/` +# Example: redis://:V2rayStrP@ss@redis:6379/1 +# If use Redis Sentinel, format as follows: `sentinel://:@:/` +# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 +CELERY_BROKER_URL=redis://:V2rayStrP@ss@10.0.0.247:6379/1 +BROKER_USE_SSL=false + +# If you are using Redis Sentinel for high availability, configure the following settings. +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 + +# ------------------------------ +# CORS Configuration +# Used to set the front-end cross-domain access policy. +# ------------------------------ + +# Specifies the allowed origins for cross-origin requests to the Web API, +# e.g. https://dify.app or * for all origins. +WEB_API_CORS_ALLOW_ORIGINS=* + +# Specifies the allowed origins for cross-origin requests to the console API, +# e.g. https://cloud.dify.ai or * for all origins. +CONSOLE_CORS_ALLOW_ORIGINS=* + +# ------------------------------ +# File Storage Configuration +# ------------------------------ + +# The type of storage to use for storing user files. +STORAGE_TYPE=opendal + +# Apache OpenDAL Configuration +# The configuration for OpenDAL consists of the following format: OPENDAL__. +# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. +# Dify will scan configurations starting with OPENDAL_ and automatically apply them. +# The scheme name for the OpenDAL storage. +OPENDAL_SCHEME=fs +# Configurations for OpenDAL Local File System. +OPENDAL_FS_ROOT=storage + +# S3 Configuration +# +S3_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com +S3_REGION=us-phoenix-1 +S3_BUCKET_NAME=phoenix-10 +S3_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2 +S3_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk= +# Whether to use AWS managed IAM roles for authenticating with the S3 service. +# If set to false, the access key and secret key must be provided. +S3_USE_AWS_MANAGED_IAM=false + +# Azure Blob Configuration +# +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_ACCOUNT_KEY=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net + +# Google Storage Configuration +# +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= + +# The Alibaba Cloud OSS configurations, +# +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +# Don't start with '/'. OSS doesn't support leading slash in object names. +ALIYUN_OSS_PATH=your-path + +# Tencent COS Configuration +# +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme + +# Oracle Storage Configuration +# +OCI_ENDPOINT=https://axqr6x6t48wm.compat.objectstorage.us-phoenix-1.oraclecloud.com +OCI_BUCKET_NAME=phoenix-10 +OCI_ACCESS_KEY=e87a121f1548b244c7bd649a1f0ca35195d46cf2 +OCI_SECRET_KEY=uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk= +OCI_REGION=us-phoenix-1 + + +# Huawei OBS Configuration +# +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +HUAWEI_OBS_SERVER=your-server-url + +# Volcengine TOS Configuration +# +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region + +# Baidu OBS Storage Configuration +# +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +BAIDU_OBS_ENDPOINT=your-server-url + +# Supabase Storage Configuration +# +SUPABASE_BUCKET_NAME=your-bucket-name +SUPABASE_API_KEY=your-access-key +SUPABASE_URL=your-server-url + +# ------------------------------ +# Vector Database Configuration +# ------------------------------ + +# The type of vector store to use. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`. +VECTOR_STORE=weaviate + +# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. +WEAVIATE_ENDPOINT=http://10.0.0.247:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih + +# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=V2rayStrP@ss +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 + +# Milvus configuration Only available when VECTOR_STORE is `milvus`. +# The milvus uri. +MILVUS_URI=http://127.0.0.1:19530 +MILVUS_TOKEN= +MILVUS_USER=root +MILVUS_PASSWORD=Milvus +MILVUS_ENABLE_HYBRID_SEARCH=False + +# MyScale configuration, only available when VECTOR_STORE is `myscale` +# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: +# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= + + +# Chroma configuration, only available when VECTOR_STORE is `chroma` +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= + +# Oracle configuration, only available when VECTOR_STORE is `oracle` +ORACLE_HOST=oracle +ORACLE_PORT=1521 +ORACLE_USER=dify +ORACLE_PASSWORD=dify +ORACLE_DATABASE=FREEPDB1 + + +# ------------------------------ +# Knowledge Configuration +# ------------------------------ + +# Upload file size limit, default 15M. +UPLOAD_FILE_SIZE_LIMIT=150 + +# The maximum number of files that can be uploaded at a time, default 5. +UPLOAD_FILE_BATCH_LIMIT=10 + +# ETL type, support: `dify`, `Unstructured` +# `dify` Dify's proprietary file extraction scheme +# `Unstructured` Unstructured.io file extraction scheme +ETL_TYPE=dify + +# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured +# Or using Unstructured for document extractor node for pptx. +# For example: http://unstructured:8000/general/v0/general +UNSTRUCTURED_API_URL= +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true + +# ------------------------------ +# Model Configuration +# ------------------------------ + +# The maximum number of tokens allowed for prompt generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating a prompt in the prompt generation tool. +# Default: 512 tokens. +PROMPT_GENERATION_MAX_TOKENS=4096 + +# The maximum number of tokens allowed for code generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating code in the code generation tool. +# Default: 1024 tokens. +CODE_GENERATION_MAX_TOKENS=20480 + +# ------------------------------ +# Multi-modal Configuration +# ------------------------------ + +# The format of the image/video/audio/document sent when the multi-modal model is input, +# the default is base64, optional url. +# The delay of the call in url mode will be lower than that in base64 mode. +# It is generally recommended to use the more compatible base64 mode. +# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. +MULTIMODAL_SEND_FORMAT=base64 +# Upload image file size limit, default 10M. +UPLOAD_IMAGE_FILE_SIZE_LIMIT=100 +# Upload video file size limit, default 100M. +UPLOAD_VIDEO_FILE_SIZE_LIMIT=10000 +# Upload audio file size limit, default 50M. +UPLOAD_AUDIO_FILE_SIZE_LIMIT=500 + +# ------------------------------ +# Sentry Configuration +# Used for application monitoring and error log tracking. +# ------------------------------ +SENTRY_DSN= + +# API Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +API_SENTRY_DSN= +# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 + +# Web Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +WEB_SENTRY_DSN= + +# ------------------------------ +# Notion Integration Configuration +# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations +# ------------------------------ + +# Configure as "public" or "internal". +# Since Notion's OAuth redirect URL only supports HTTPS, +# if deploying locally, please use Notion's internal integration. +NOTION_INTEGRATION_TYPE=internal +# Notion OAuth client secret (used for public integration type) +NOTION_CLIENT_SECRET= +# Notion OAuth client id (used for public integration type) +NOTION_CLIENT_ID= +# Notion internal integration secret. +# If the value of NOTION_INTEGRATION_TYPE is "internal", +# you need to configure this variable. +NOTION_INTERNAL_SECRET=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 + +# ------------------------------ +# Mail related configuration +# ------------------------------ + +# Mail type, support: resend, smtp +MAIL_TYPE=resend + +# Default send from email address, if not specified +MAIL_DEFAULT_SEND_FROM= + +# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. +RESEND_API_URL=https://api.resend.com +RESEND_API_KEY=your-resend-api-key + + +# SMTP server configuration, used when MAIL_TYPE is `smtp` +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false + +# ------------------------------ +# Others Configuration +# ------------------------------ + +# Maximum length of segmentation tokens for indexing +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 + +# Member invitation link valid time (hours), +# Default: 72. +INVITE_EXPIRY_HOURS=72 + +# Reset password token valid time (minutes), +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 + +# The sandbox service endpoint. +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=80000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=80000 + +# Workflow runtime configuration +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_PARALLEL_DEPTH_LIMIT=3 +WORKFLOW_FILE_UPLOAD_LIMIT=10 + +# HTTP request node in workflow configuration +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 + +# SSRF Proxy server HTTP URL +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +# SSRF Proxy server HTTPS URL +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 + +# ------------------------------ +# Environment Variables for web Service +# ------------------------------ + +# The timeout for the text generation in millisecond +TEXT_GENERATION_TIMEOUT_MS=60000 + +# ------------------------------ +# Environment Variables for db Service +# ------------------------------ + +PGUSER=${DB_USERNAME} +# The password for the default postgres user. +POSTGRES_PASSWORD=${DB_PASSWORD} +# The name of the default postgres database. +POSTGRES_DB=${DB_DATABASE} +# postgres data directory +PGDATA=/var/lib/postgresql/data/pgdata + +# ------------------------------ +# Environment Variables for sandbox Service +# ------------------------------ + +# The API key for the sandbox service +SANDBOX_API_KEY=dify-sandbox +# The mode in which the Gin framework runs +SANDBOX_GIN_MODE=release +# The timeout for the worker in seconds +SANDBOX_WORKER_TIMEOUT=15 +# Enable network for the sandbox service +SANDBOX_ENABLE_NETWORK=true +# HTTP proxy URL for SSRF protection +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +# HTTPS proxy URL for SSRF protection +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +# The port on which the sandbox service runs +SANDBOX_PORT=8194 + +# ------------------------------ +# Environment Variables for weaviate Service +# (only used when VECTOR_STORE is weaviate) +# ------------------------------ +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai + + + +# ------------------------------ +# Environment Variables for SSRF Proxy +# ------------------------------ +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 + +# ------------------------------ +# docker env var for specifying vector db type at startup +# (based on the vector db type, the corresponding docker +# compose profile will be used) +# if you want to use unstructured, add ',unstructured' to the end +# ------------------------------ +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate} + +# ------------------------------ +# Docker Compose Service Expose Host Port Configurations +# ------------------------------ +EXPOSE_NGINX_PORT=20080 +EXPOSE_NGINX_SSL_PORT=20443 + +# ---------------------------------------------------------------------------- +# ModelProvider & Tool Position Configuration +# Used to specify the model providers and tools that can be used in the app. +# ---------------------------------------------------------------------------- + +# Pin, include, and exclude tools +# Use comma-separated values with no spaces between items. +# Example: POSITION_TOOL_PINS=bing,google +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= + +# Pin, include, and exclude model providers +# Use comma-separated values with no spaces between items. +# Example: POSITION_PROVIDER_PINS=openai,openllm +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= + +# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP +CSP_WHITELIST= + +# Enable or disable create tidb service job +CREATE_TIDB_SERVICE_JOB_ENABLED=false + +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 + +# The maximum number of top-k value for RAG. +TOP_K_MAX_VALUE=10 + +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +EXPOSE_PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DAEMON_URL=http://plugin_daemon:5002 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_PPROF_ENABLED=false + +PLUGIN_DEBUGGING_HOST=0.0.0.0 +PLUGIN_DEBUGGING_PORT=5003 +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 + +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DIFY_INNER_API_URL=http://api:5001 + +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} + +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai + +FORCE_VERIFYING_SIGNATURE=true diff --git a/0-部署应用/Phoneix-arm64-02/AI-Dify/docker-compose-modified.yaml b/0-部署应用/Phoneix-arm64-02/AI-Dify/docker-compose-modified.yaml new file mode 100644 index 0000000..452b234 --- /dev/null +++ b/0-部署应用/Phoneix-arm64-02/AI-Dify/docker-compose-modified.yaml @@ -0,0 +1,59 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + +services: + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-V2rayStrP@ss} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-1024MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-64MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-128MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-2048MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + ports: + - 5432:5432 + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - /mnt/ramdisk/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-50} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + ports: + - 8080:8080 + + + + + diff --git a/0-部署应用/Phoneix-arm64-02/基本信息.txt b/0-部署应用/Phoneix-arm64-02/基本信息.txt new file mode 100644 index 0000000..c8ca20c --- /dev/null +++ b/0-部署应用/Phoneix-arm64-02/基本信息.txt @@ -0,0 +1,4 @@ + +129.146.57.94 + +10.0.0.247 \ No newline at end of file diff --git a/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/2-17-deploy.env b/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/2-17-deploy.env similarity index 100% rename from 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/2-17-deploy.env rename to 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/2-17-deploy.env diff --git a/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/app-flowy-docker-compose.yaml b/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/app-flowy-docker-compose.yaml similarity index 100% rename from 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/app-flowy-docker-compose.yaml rename to 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/app-flowy-docker-compose.yaml diff --git a/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/appflowy-client.yaml b/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/appflowy-client.yaml similarity index 100% rename from 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/appflowy-client.yaml rename to 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/appflowy-client.yaml diff --git a/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/deploy.env b/0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/deploy.env similarity index 100% rename from 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品/deploy.env rename to 0-部署应用/Tokyo-arm64-01/AppFlowy-Notion替代品-废弃/deploy.env diff --git a/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/README_CN.md b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/README_CN.md new file mode 100644 index 0000000..1e8c9ed --- /dev/null +++ b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/README_CN.md @@ -0,0 +1,514 @@ +

+ +

+ +# 🤖️ TeleChat + +[英文](README.md) | [中文](README_CN.md) + +

+ + + + + + + + docker pull + +

+ +ChatGPT Telegram 机器人是一个强大的 Telegram 机器人,可以使用多种主流的大语言模型 API,包括 GPT-3.5/4/4 Turbo/4o/o1,DALL·E 3,Claude2.1/3/3.5 API,Gemini 1.5 Pro/Flash,Vertex AI(Claude系列/Gemini系列),Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。它使用户能够在 Telegram 上进行高效的对话和信息搜索。 + +## ✨ 功能 + +- **多种AI模型**:支持GPT-3.5/4/4 Turbo/4o/o1,DALL·E 3,Claude2.1/3/3.5 API,Gemini 1.5 Pro/Flash,Vertex AI(Claude系列/Gemini系列),Groq Mixtral-8x7b/LLaMA2-70b 和 DuckDuckGo(gpt-4o-mini, claude-3-haiku, Meta-Llama-3.1-70B, Mixtral-8x7B)。还支持 one-api/new-api/[uni-api](https://github.com/yym68686/uni-api)。利用自研 API 请求后端 [SDK](https://github.com/yym68686/ModelMerge),不依赖 OpenAI SDK。 +- **多模态问答**:支持语音、音频、图像和 PDF/TXT/MD/python 文档的问答。用户可以直接在聊天框中上传文件使用。 +- **群聊主题模式**:支持在群聊中启用主题模式,在主题之间隔离API、对话历史、插件配置和偏好设置。 +- **丰富的插件系统**:支持网页搜索(DuckDuckGo和Google)、URL 总结、ArXiv 论文总结和代码解释器。 +- **用户友好界面**:允许在聊天窗口内灵活切换模型,并支持类似打字机效果的流式输出。支持精确的 Markdown 消息渲染,利用了我的另一个[项目](https://github.com/yym68686/md2tgmd)。 +- **高效消息处理**:异步处理消息,多线程回答问题,支持隔离对话,并为不同用户提供独特对话。 +- **长文本消息处理**: 自动合并长文本消息,突破Telegram的单条消息长度限制。当机器人的回复超过Telegram的限制时,它将被拆分成多条消息。 +- **多用户对话隔离**:支持对话隔离和配置隔离,允许在多用户和单用户模式之间进行选择。 +- **问题预测**: 自动生成后续问题,预测用户可能会接下来询问的问题。 +- **多语言界面**:支持简体中文、繁体中文、俄文和英文界面。 +- **白名单、黑名单和管理员设置**:支持设置白名单、黑名单和管理员。 +- **内联模式**:允许用户在任何聊天窗口中 @ 机器人以生成答案,而无需在机器人的聊天窗口中提问。 +- **方便部署**:支持一键部署到 koyeb、Zeabur、Replit,真正零成本和傻瓜式部署流程。还支持 kuma 防休眠,以及 Docker 和 fly.io 部署。 + +## 🍃 环境变量 + +以下是与机器人核心设置相关的环境变量列表: + +| 变量名称 | 描述 | 是否必需? | +|---------------|-------------|-----------| +| BOT_TOKEN | Telegram 机器人令牌。 在 [BotFather](https://t.me/BotFather) 上创建一个机器人以获取 BOT_TOKEN。 | **是** | +| API | OpenAI 或第三方 API 密钥。 | 否 | +| GPT_ENGINE | 设置默认的QA模型;默认是:`gpt-4o`。此项可以使用机器人的“info”命令自由切换,原则上不需要设置。 | 否 | +| WEB_HOOK | 每当电报机器人收到用户消息时,消息将被传递到 WEB_HOOK,机器人将在此监听并及时处理收到的消息。 | 否 | +| API_URL | 如果您使用的是OpenAI官方API,则无需设置此项。如果您使用的是第三方API,则需要填写第三方代理网站。默认值是:https://api.openai.com/v1/chat/completions | 否 | +| GROQ_API_KEY | Groq官方API密钥。 | 否 | +| GOOGLE_AI_API_KEY | Google AI 官方 API 密钥。使用此环境变量访问 Gemini 系列模型,包括 Gemini 1.5 pro 和 Gemini 1.5 flash。| 否 | +| VERTEX_PRIVATE_KEY | 描述: Google Cloud Vertex AI 服务账户的私钥。格式: 包含服务账户私钥信息的 JSON 字符串里面的 private_key 字段的值,请使用双引号包裹私钥。如何获取: 在 Google Cloud 控制台中创建一个服务账户,生成一个 JSON 密钥文件,并将其内容里面的 private_key 字段的值使用双引号包裹后设置为此环境变量的值。 | 否 | +| VERTEX_PROJECT_ID | 描述:您的 Google Cloud 项目 ID。格式:一个字符串,通常由小写字母、数字和连字符组成。如何获取:您可以在 Google Cloud 控制台的项目选择器中找到您的项目 ID。 | 否 | +| VERTEX_CLIENT_EMAIL | 描述:Google Cloud Vertex AI 服务账户的电子邮件地址。格式:通常是 "service-account-name@developer.gserviceaccount.com" 形式的字符串。获取方式:在创建服务账户时生成,或可以在 Google Cloud 控制台的 "IAM & 管理" 部分的服务账户详细信息中查看。 | 否 | +| claude_api_key | Claude 官方 API 密钥。 | 否 | +| CLAUDE_API_URL | 如果您使用的是Anthropic官方API,则无需设置此项。如果您使用的是第三方Anthropic API,则需要填写第三方代理网站。默认值是:https://api.anthropic.com/v1/messages | 否 | +| NICK | 默认是空的,NICK 是机器人的名字。机器人只会在用户输入的消息以 NICK 开头时才会响应,否则机器人会响应任何消息。特别是在群聊中,如果没有 NICK,机器人会回复所有消息。 | 否 | +| GOOGLE_API_KEY | 如果你需要使用谷歌搜索,你需要设置它。如果你不设置这个环境变量,机器人将默认提供duckduckgo搜索。 | No | +| GOOGLE_CSE_ID | 如果你需要使用谷歌搜索,你需要和 GOOGLE_API_KEY 一起设置。 | 否 | +| whitelist | 设置哪些用户可以访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None`,这意味着机器人对所有人开放。 | 否 | +| BLACK_LIST | 设置哪些用户禁止访问机器人,并用 ',' 连接被授权使用机器人的用户ID。默认值是 `None` | 否 | +| ADMIN_LIST | 设置管理员列表。只有管理员可以使用 `/info` 命令配置机器人。 | 否 | +| GROUP_LIST | 设置可以使用机器人的群组列表。使用逗号(',')连接群组ID。即使群组成员不在白名单中,只要群组ID在GROUP_LIST中,群组的所有成员都可以使用机器人。 | 否 | +| CUSTOM_MODELS | 设置自定义模型名称列表。使用逗号(',')连接模型名称。如果需要删除默认模型,请在默认模型名称前添加连字符(-)。如果要删除所有默认模型,请使用 `-all`。 | 否 | +| CHAT_MODE | 引入多用户模式,不同用户的配置不共享。当 CHAT_MODE 为 `global` 时,所有用户共享配置。当 CHAT_MODE 为 `multiusers` 时,用户配置彼此独立。 | 否 | +| temperature | 指定 LLM 的温度。默认值是 `0.5`。 | 否 | +| GET_MODELS | 指定是否通过 API 获取支持的模型。默认值为 `False`。 | 否 | +| SYSTEMPROMPT | 指定系统提示,系统提示是字符串,例如:`SYSTEMPROMPT=You are ChatGPT, a large language model trained by OpenAI. Respond conversationally`。默认是 `None`。系统提示的设置仅在 `CHAT_MODE` 为 `global` 时,系统提示的设置才会有效。当 `CHAT_MODE` 为 `multiusers` 时,系统提示的环境变量无论是任何值都不会修改任何用户的系统提示,因为用户不希望自己设置的系统系统被修改为全局系统提示。 | 否 | +| LANGUAGE | 指定机器人显示的默认语言,包括按钮显示语言和对话语言。默认是 `English`。目前仅支持设置为下面四种语言:`English`,`Simplified Chinese`,`Traditional Chinese`,`Russian`。同时也可以在机器人部署后使用 `/info` 命令设置显示语言 | 否 | +| CONFIG_DIR | 指定存储用户配置文件夹。CONFIG_DIR 是用于存储用户配置的文件夹。每次机器人启动时,它都会从 CONFIG_DIR 文件夹读取配置,因此用户每次重新启动时不会丢失之前的设置。您可以在本地使用 Docker 部署时,通过使用 `-v` 参数挂载文件夹来实现配置持久化。默认值是 `user_configs`。 | 否 | +| RESET_TIME | 指定机器人每隔多少秒重置一次聊天历史记录,每隔 RESET_TIME 秒,机器人会重置除了管理员列表外所有用户的聊天历史记录,每个用户重置时间不一样,根据每个用户最后的提问时间来计算下一次重置时间。而不是所有用户在同一时间重置。默认值是 `3600` 秒,最小值是 `60` 秒。 | 否 | + +以下是与机器人偏好设置相关的环境变量列表,偏好设置也可以通过机器人启动后使用 `/info` 命令,点击 `偏好设置` 按钮来设置: + +| 变量名称 | 描述 | 必需的? | +|---------------|-------------|-----------| +| PASS_HISTORY | 默认值是 `9999`。机器人会记住对话历史,并在下次回复时考虑上下文。如果设置为 `0`,机器人将忘记对话历史,只考虑当前对话。PASS_HISTORY 的值必须大于或等于 0。对应于偏好设置里面的名为 `对话历史` 的按钮。 | 否 | +| LONG_TEXT | 如果用户的输入消息的文本长度超出了 Telegram 的限制,并在很短的时间内连续发送多个消息,机器人会将这些多个消息视为一个。默认值是 `True`。对应于偏好设置里面的名为 `长文本合并` 的按钮。 | 否 | +| IMAGEQA | 是否启用图像问答,默认设置是模型可以回答图像内容,默认值为 `True`。对应于偏好设置里面的名为 `图片问答` 的按钮。 | 否 | +| LONG_TEXT_SPLIT | 当机器人的回复超过Telegram限制时,它将被拆分为多个消息。默认值是 `True`。对应于偏好设置里面的名为 `长文本分割` 的按钮。 | 否 | +| FILE_UPLOAD_MESS | 当文件或图像上传成功并且机器人处理完成时,机器人将发送一条消息,提示上传成功。默认值为 `True`。对应于偏好设置里面的名为 `文件上传成功提示消息` 的按钮。 | 否 | +| FOLLOW_UP | 自动生成多个相关问题供用户选择。默认值为 `False`。对应于偏好设置里面的名为 `猜你想问` 的按钮。 | 否 | +| TITLE | 是否在机器人回复的开头显示模型名称。默认值为 `False`。对应于偏好设置里面的名为 `模型标题` 的按钮。 | 否 | + +| REPLY | 机器人是否应以“回复”格式回复用户的消息。默认值为 `False`。对应于偏好设置里面的名为 `回复消息` 的按钮。 | 否 | + +以下是与机器人插件设置相关的环境变量列表: + +| 变量名称 | 描述 | 必需的? | +|---------------|-------------|-----------| +| SEARCH | 是否启用搜索插件。默认值为 `True`。 | 否 | +| URL | 是否启用URL摘要插件。默认值为 `True`。 | 否 | +| ARXIV | 是否启用arXiv论文摘要插件。默认值为 `False`。 | 否 | +| CODE | 是否启用代码解释器插件。默认值为 `False`。 | 否 | +| IMAGE | 是否启用图像生成插件。默认值为 `False`。 | 否 | +| DATE | 是否启用日期插件。默认值为 `False`。 | 否 | + + +## Koyeb 远程部署 + +可以使用两种方式部署在 koyeb 上部署,一种是使用 Koyeb 提供的 docker 镜像一键部署,另一种是导入本仓库部署。这两种方式都是免费的。第一种方式部署简单,但是无法自动更新,第二种方式部署稍微复杂,但是可以自动更新。 + +### 一键部署 + +点击下面的按钮可以自动使用构建好的 docker 镜像一键部署: + +[![Deploy to Koyeb](https://www.koyeb.com/static/images/deploy/button.svg)](https://app.koyeb.com/deploy?type=docker&image=docker.io/yym68686/chatgpt:latest&name=chatbot) + +⚠️ 注意:使用 Koyeb 部署时,必须添加环境变量 `WEB_HOOK`,否则机器人无法接收消息。使用类似 `https://appname.koyeb.app` 的字符串作为 `WEB_HOOK` 的值,Koyeb 会自动分配一个二级域名。 + +### 仓库部署 + +1. fork 本仓库 [点击 fork 本仓库](https://github.com/yym68686/ChatGPT-Telegram-Bot/fork) + +2. 部署时候需要选择以仓库的方式,`Run command` 设置为 `python3 bot.py`,`Exposed ports` 设置为 `8080`。 + +3. [安装 pull](https://github.com/apps/pull) 自动同步本仓库。 + +## Zeabur 远程部署 + +一键部署: + +[![在 Zeabur 上部署](https://zeabur.com/button.svg)](https://zeabur.com/templates/R5JY5O?referralCode=yym68686) + +如果您需要后续功能更新,建议采用以下部署方法: + +- 首先 fork 这个仓库,然后注册 [Zeabur](https://zeabur.com)。目前,Zeabur 不支持免费的 Docker 容器部署。如果你需要使用 Zeabur 来部署这个项目的机器人,你需要升级到 Developer Plan。幸运的是,Zeabur 推出了他们的[赞助计划](https://zeabur.com/docs/billing/sponsor),为这个项目的所有贡献者提供一个月的 Developer Plan。如果你有想要增强的功能,欢迎提交 pull requests 到这个项目。 +- 从您自己的Github仓库导入。 +- 设置所需的环境变量,并重新部署。 +- 如果您需要后续的功能更新,只需在您自己的代码库中同步此代码库,并在 Zeabur 中重新部署以获取最新功能。 + +## Replit 远程部署 + +[![在 Repl.it 上运行](https://replit.com/badge/github/yym68686/ChatGPT-Telegram-Bot)](https://replit.com/new/github/yym68686/ChatGPT-Telegram-Bot) + +导入 Github 仓库后,设置运行命令 + +```bash +pip install -r requirements.txt > /dev/null && python3 bot.py +``` + +在工具侧边栏中选择“Secrets”,添加机器人所需的环境变量,其中: + +- WEB_HOOK: Replit 会自动为您分配一个域名,填写 `https://appname.username.repl.co` +- 记得打开“始终开启” + +点击屏幕顶部的运行按钮来运行机器人。 + +## fly.io 远程部署 + +官方文档: https://fly.io/docs/ + +使用 Docker 镜像部署 fly.io 应用程序 + +```bash +flyctl launch --image yym68686/chatgpt:latest +``` + +在提示时输入应用程序的名称,并选择“否”以初始化 Postgresql 或 Redis。 + +按照提示进行部署。官方控制面板将提供一个二级域名,可用于访问服务。 + +设置环境变量 + +```bash +flyctl secrets set BOT_TOKEN=bottoken +flyctl secrets set API= +# 可选 +flyctl secrets set WEB_HOOK=https://flyio-app-name.fly.dev/ +flyctl secrets set NICK=javis +``` + +查看所有环境变量 + +```bash +flyctl secrets list +``` + +删除环境变量 + +```bash +flyctl secrets unset MY_SECRET DATABASE_URL +``` + +ssh 到 fly.io 容器 + +```bash +flyctl ssh issue --agent +# ssh 连接 +flyctl ssh establish +``` + +检查 webhook URL 是否正确 + +```bash +https://api.telegram.org/bot/getWebhookInfo +``` + +## Docker 本地部署 + +启动容器 + +```bash +docker run -p 80:8080 --name chatbot -dit \ + -e BOT_TOKEN=your_telegram_bot_token \ + -e API= \ + -e API_URL= \ + -v ./user_configs:/home/user_configs \ + yym68686/chatgpt:latest +``` + +或者如果你想使用 Docker Compose,这里有一个 docker-compose.yml 示例: + +```yaml +version: "3.5" +services: + chatgptbot: + container_name: chatgptbot + image: yym68686/chatgpt:latest + environment: + - BOT_TOKEN= + - API= + - API_URL= + volumes: + - ./user_configs:/home/user_configs + ports: + - 80:8080 +``` + +在后台运行 Docker Compose 容器 + +```bash +docker-compose pull +docker-compose up -d + +# uni-api +docker-compose -f docker-compose-uni-api.yml up -d +``` + +将存储库中的Docker镜像打包并上传到Docker Hub + +```bash +docker build --no-cache -t chatgpt:latest -f Dockerfile.build --platform linux/amd64 . +docker tag chatgpt:latest yym68686/chatgpt:latest +docker push yym68686/chatgpt:latest +``` + +一键重启 Docker 镜像 + +```bash +set -eu +docker pull yym68686/chatgpt:latest +docker rm -f chatbot +docker run -p 8080:8080 -dit --name chatbot \ +-e BOT_TOKEN= \ +-e API= \ +-e API_URL= \ +-e GOOGLE_API_KEY= \ +-e GOOGLE_CSE_ID= \ +-e claude_api_key= \ +-v ./user_configs:/home/user_configs \ +yym68686/chatgpt:latest +docker logs -f chatbot +``` + +该脚本用于通过单个命令重启Docker镜像。它首先删除名为“chatbot”的现有Docker容器(如果存在)。然后,它运行一个名为“chatbot”的新Docker容器,暴露端口8080并设置各种环境变量。使用的Docker镜像是“yym68686/chatgpt:latest”。最后,它跟踪“chatbot”容器的日志。 + +## 🚀 源代码本地部署 + +python >= 3.10 + +直接从源代码运行机器人而不使用docker,克隆仓库: + +```bash +git clone --recurse-submodules --depth 1 -b main --quiet https://github.com/yym68686/ChatGPT-Telegram-Bot.git +``` + +安装依赖项: + +```bash +pip install -r requirements.txt +``` + +设置环境变量: + +```bash +export BOT_TOKEN= +export API= +``` + +运行: + +```bash +python bot.py +``` + +## 🧩 插件 + +机器人支持多种插件,包括:DuckDuckGo 和 Google 搜索、URL 摘要、ArXiv 论文摘要、DALLE-3 画图和代码解释器等。您可以通过设置环境变量来启用或禁用这些插件。 + +- 如何开发插件? + +插件相关的代码全部在本仓库git 子模块ModelMerge里面,ModelMerge是我开发的一个独立的仓库,用于处理API请求,对话历史记录管理等功能。当你使用git clone的--recurse-submodules参数克隆本仓库后,ModelMerge会自动下载到本地。插件所有的代码在本仓库中的相对路径为 `ModelMerge/src/ModelMerge/plugins`。你可以在这个目录下添加自己的插件代码。插件开发的流程如下: + +1. 在 `ModelMerge/src/ModelMerge/plugins` 目录下创建一个新的 Python 文件,例如 `myplugin.py`。在 `ModelMerge/src/ModelMerge/plugins/__init__.py` 文件中导入你的插件,例如 `from .myplugin import MyPlugin`。 + +2. 在 `ModelMerge/src/ModelMerge/tools/chatgpt.py` 里面的 `function_call_list` 变量中添加你的插件OpenAI tool格式详细的请求体。Claude Gemini tool 不需要额外编写,你仅需要填写OpenAI格式的tool请求体,程序在请求Gemini或者Claude API的时候,会自动转换为Claude/Gemini tool格式。`function_call_list` 是一个字典,键是插件的名称,值是插件的请求体。请保证`function_call_list` 字典的键名保证唯一性,不能和已有的插件键名重复。 + +3. 在 `ModelMerge/src/ModelMerge/plugins/config.py` 里面的 `PLUGINS` 字典里面添加键值对,键是插件的名称,值是插件的环境变量及其默认值。这个默认值是插件的开关,如果默认值是`True`,那么插件默认是开启的,如果默认值是 `False`,那么插件默认是关闭的,需要在用户在 `/info` 命令里面手动开启。 + +4. 最后,在 `ModelMerge/src/ModelMerge/plugins/config.py` 里面的函数 `get_tools_result_async` 添加插件调用的代码,当机器人需要调用插件的时候,会调用这个函数。你需要在这个函数里面添加插件的调用代码。 + +完成上面的步骤,你的插件就可以在机器人中使用了。🎉 + +## 📄 常见问题 + +- WEB_HOOK 环境变量有什么用?应该如何使用? + +WEB_HOOK 是一个 webhook 地址。具体来说,当 Telegram 机器人收到用户消息时,它会将消息发送到 Telegram 服务器,然后 Telegram 服务器将消息转发到机器人设置的 WEB_HOOK 地址的服务器。因此,当消息发送到机器人时,机器人几乎立即执行处理程序。通过 WEB_HOOK 接收消息比未设置 WEB_HOOK 时的响应时间更快。 + +当使用 Zeabur、Replit 或 Koyeb 等平台部署机器人时,这些平台会提供一个域名,你需要将其填写在 WEB_HOOK 中,以便机器人接收用户消息。当然,不设置 WEB_HOOK 也是可以的,但机器人的响应时间会稍长一些,虽然差别不大,所以一般来说不需要设置 WEB_HOOK。 + +当在服务器上部署一个机器人时,你需要使用像nginx或caddy这样的反向代理工具,将Telegram服务器发送的消息转发到你的服务器,这样机器人才能接收到用户消息。因此,你需要将WEB_HOOK设置为你服务器的域名,并将请求WEB_HOOK的流量转发到机器人所在的服务器和相应端口。例如,在caddy中,你可以在caddy配置文件/etc/caddy/Caddyfile中这样配置: + +```caddy +your_webhook_domain.com { + reverse_proxy localhost:8082 +} +``` + +- 为什么我不能使用谷歌搜索? + +默认情况下,提供DuckDuckGo搜索。Google搜索的官方API需要用户申请。它可以提供GPT之前无法回答的实时信息,例如今天微博的热门话题、特定地点的今日天气,以及某个人或新闻事件的进展。 + +- 为什么即使我添加了Google搜索API,我还是不能使用搜索功能? + +有两种可能性: + +1. 只有支持工具使用的大型语言模型(LLM)API才能使用搜索功能。目前,本项目仅支持 OpenAI、Claude 和 Gemini 系列模型的 API 进行搜索功能。其他模型提供商的 API 目前不支持在本项目中使用工具。如果您有希望适配的模型提供商,可以联系维护者。 + +2. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API,但无法使用搜索功能,可能是因为搜索功能未启用。您可以通过 `/info` 命令点击偏好设置来检查搜索功能是否启用。 + +3. 如果您使用了 OpenAI、Claude 和 Gemini 系列模型的 API,请确保你使用的是官方 API,如果你使用的是第三方中转 API,提供商可能通过网页逆向的方式向你提供 API,通过网页逆向的方式提供 API 无法使用 tools use,即不能使用本项目所有的插件。如果你确认你使用的是官方 API,仍然无法成功搜索,请联系开发人员。 + +- 我如何切换模型? + +您可以在聊天窗口中使用 "/info" 命令在 GPT3.5/4/4o 和其他模型之间切换。 + +- 它可以在一个群组中部署吗? + +是的,它支持群组白名单以防止滥用和信息泄露。 + +- 为什么我把机器人添加到群组后它不能说话? + +如果这是您第一次将机器人添加到群聊中,您需要在botfather中将群组隐私设置为禁用,然后将机器人从群聊中移除并重新添加,以便正常使用。 + +第二种方法是将机器人设置为管理员,这样机器人就可以正常使用了。然而,如果你想将机器人添加到你不是管理员的群聊中,第一种方法更为合适。 + +另一种可能性是 GROUP_LIST 集不是当前的群聊 ID。请检查是否设置了 GROUP_LIST;GROUP_LIST 是群 ID,而不是群名称。群 ID 以减号开头,后跟一串数字。 + +- GROUP_LIST、ADMIN_LIST 和白名单的设置如何影响机器人的行为? + +如果未设置白名单,所有人都可以使用机器人。如果设置了白名单,只有白名单中的用户可以使用机器人。如果设置了GROUP_LIST,只有GROUP_LIST中的群组可以使用机器人。如果同时设置了白名单和GROUP_LIST,群组中的所有人都可以使用机器人,但只有白名单中的用户可以私聊机器人。如果设置了ADMIN_LIST,只有ADMIN_LIST中的用户可以使用/info命令来更改机器人的设置。如果未设置ADMIN_LIST,所有人都可以使用/info命令来更改机器人的配置。GROUP_LIST 也可以包含频道,频道ID以减号开头,后跟一串数字。 + +- 我应该如何设置 API_URL? + +API_URL 支持所有后缀,包括:https://api.openai.com/v1/chat/completions、https://api.openai.com/v1 和 https://api.openai.com/。机器人将根据不同的用途自动分配不同的端点。 + +- 是否有必要配置 web_hook 环境变量? + +web_hook 不是强制性的环境变量。你只需要设置域名(必须与 WEB_HOOK 一致)和其他根据你的应用功能所需的环境变量。 + +- 我用docker compose部署了一个机器人。如果文档放在本地服务器上,应该挂载到哪个目录才能生效?我需要设置额外的配置和修改代码吗? + +您可以直接通过聊天框将文档发送给机器人,机器人会自动解析文档。要使用文档对话功能,您需要启用历史对话功能。无需对文档进行额外处理。 + +- 我还是无法让它正常工作……我想在一个群组中使用它,我已经将 ADMIN_LIST 设置为我自己,并将 GROUP_LIST 设置为那个群组,白名单留空。但是,只有我可以在那个群组中使用它,群组中的其他成员被提示没有权限,这是怎么回事? + +这是一个故障排除指南:请仔细检查 GROUP_LIST 是否正确。Telegram 群组的 ID 以负号开头,后跟一系列数字。如果不是,请使用此机器人 [bot](https://t.me/getidsbot) 重新获取群组 ID。 + +- 我上传了一个文档,但它没有根据文档的内容做出响应。怎么回事? + +要使用文档问答功能,您必须先启用历史记录。您可以通过 `/info` 命令开启历史记录,或者通过将环境变量 `PASS_HISTORY` 设置为大于2来默认启用历史记录。请注意,启用历史记录将会产生额外费用,因此该项目默认不启用历史记录。这意味着在默认设置下无法使用问答功能。在使用此功能之前,您需要手动启用历史记录。 + +- 设置 `NICK` 后,当我 @ 机器人时没有响应,它只在消息以昵称开头时才回复。我怎样才能让它同时响应昵称和 @机器人名? + +在群聊场景中,如果环境变量 `NICK` 未设置,机器人将接收所有群消息并回应所有消息。因此,有必要设置 `NICK`。设置 `NICK` 后,机器人只会回应以 `NICK` 开头的消息。所以,如果你想 @ 机器人以获得回应,你只需将 NICK 设置为 @botname。这样,当你在群里 @ 机器人时,机器人会检测到消息是以 @botname 开头的,并会回应该消息。 + +- 历史会保留多少条消息? + +所有其他模型使用官方上下文长度设置,例如,`gpt-3.5-turbo-16k` 的上下文是 16k,`gpt-4o` 的上下文是 128k,`Claude3/3.5` 的上下文是 200k。此限制是为了节省用户成本,因为大多数场景不需要高上下文。 + +- 如何从模型列表中删除默认模型名称? + +你可以使用 `CUSTOM_MODELS` 环境变量来完成它。例如,如果你想添加 gpt-4o 并从模型列表中移除 gpt-3.5 模型,请将 `CUSTOM_MODELS` 设置为 `gpt-4o,-gpt-3.5`。如果你想一次性删除所有默认模型,你可以将 `CUSTOM_MODELS` 设置为 `-all,gpt-4o`。 + +- 对话隔离具体是如何工作的? + +对话总是基于不同的窗口隔离,而不是不同的用户。这意味着在同一个群聊窗口、同一个主题和同一个私聊窗口内,都会被视为同一个对话。CHAT_MODE 只影响配置是否隔离。在多用户模式下,每个用户的插件配置、偏好等都是独立的,互不影响。在单用户模式下,所有用户共享相同的插件配置和偏好。然而,对话历史总是隔离的。对话隔离是为了保护用户隐私,确保用户的对话历史、插件配置、偏好等不被其他用户看到。 + +- 为什么 Docker 镜像很久没有更新了? + +Docker 镜像只存储程序的运行环境。目前,程序的运行环境是稳定的,环境依赖几乎没有变化,所以 Docker 镜像没有更新。每次重新部署 Docker 镜像时,它会拉取最新的代码,因此不需要担心 Docker 镜像更新的问题。 + +- 为什么容器在启动后报告错误 "http connect error or telegram.error.TimedOut: Timed out"? + +此问题可能是由于部署 Docker 的服务器无法连接到 Telegram 服务器或 Telegram 服务器的不稳定性引起的。 + +1. 在大多数情况下,重新启动服务,检查服务器网络环境,或等待 Telegram 服务恢复即可。 +2. 此外,您可以尝试通过网络钩子与Telegram服务器进行通信,这可能会解决问题。 + +- 如何让 docker 无限重试而不是一开始就停止? + +Docker 中的 `--restart unless-stopped` 参数设置容器的重启策略。具体来说: + +1. unless-stopped: 这个策略意味着容器如果停止了会自动重启,除非它是被手动停止的。换句话说,如果容器由于错误或系统重启而停止,它会自动重启。然而,如果你手动停止了容器(例如,使用docker stop命令),它将不会自行重启。 +此参数对于需要连续运行的服务特别有用,因为它确保服务能够在意外中断后自动恢复,而无需手动干预。 + +2. 示例:假设你有一个运行 web 服务器的 Docker 容器,并且你希望它在崩溃或系统重启时自动重启,但在你手动停止它时不重启。你可以使用以下命令: + +```shell +docker run -d --name my-web-server -p 80:80 --restart unless-stopped my-web-server-image +``` +在此示例中,名为 my-web-server 的 web 服务器容器将自动重新启动,除非您手动停止它。 + +- 切换模型,我需要重新输入提示吗? + +是的,因为切换模型会重置历史记录,所以您需要重新输入提示。 + +- PASS_HISTORY 的适当值是什么? + +PASS_HISTORY的数量严格等于对话历史中的消息数量。推荐值是2,因为系统提示占用了一个消息计数。如果设置为0,PASS_HISTORY将自动重置为2,以确保对话正常进行。当PASS_HISTORY小于或等于2时,机器人的行为可以被视为只记住当前对话,即一个问题和一个答案,并且下次不会记住之前的问答内容。PASS_HISTORY的最大值没有限制,但请注意,对话历史中的消息越多,每次对话的成本就越高。当未设置PASS_HISTORY时,默认值为9999,表示对话历史中的消息数量为9999。 + +- 机器人令牌可以有多个令牌吗? + +不,将来它会支持多个机器人令牌。 + +- 如何使用机器人命令? + +1. `/info`: 机器人 `/info` 命令可以查看机器人的配置信息,包括当前使用的模型、API URL、API 密钥等。它还可以更改机器人的显示语言、偏好设置和插件设置。 + +2. `/start`:机器人 `/start` 命令可以查看机器人的使用说明、使用方法和功能介绍。您可以使用 `/start` 命令设置 API 密钥。如果您有官方的 OpenAI API 密钥,请使用以下命令:`/start your_api_key`。如果您使用的是第三方 API 密钥,请使用以下命令:`/start https://your_api_url your_api_key`。 + +3. `/reset`:机器人 `/reset` 命令可以清除机器人的对话消息,并强制机器人停止生成回复。如果你想重置系统提示,请使用以下命令:`/reset your_system_prompt`。但是,`/reset` 命令永远不会恢复机器人的显示语言、偏好设置、插件设置、使用中的模型、API URL、API 密钥、系统提示等。 + +- 如果 Koyeb 部署失败怎么办? + +Koyeb 的免费服务可能有点不稳定,所以部署失败是很常见的。你可以尝试重新部署,如果还是不行的话,考虑换到另一个平台。😊 + +- 为什么我使用 CUSTOM_MODELS 删除默认模型名称后,再使用 /info 命令检查时它又重新出现了? + +如果你使用 `docker-compose.yml` 部署,不要在 `CUSTOM_MODELS` 的值周围添加引号。错误用法:`CUSTOM_MODELS="gpt-4o,-gpt-3.5"`,否则会导致环境变量解析错误,导致默认模型名称再次出现。错误的方式会被解析为删除 `gpt-3.5"` 模型,这将导致默认模型名称 `gpt-3.5` 未被删除。正确的写法是:`CUSTOM_MODELS=gpt-4o,-gpt-3.5`。 + +## 参考文献 + +https://core.telegram.org/bots/api + +https://github.com/acheong08/ChatGPT + +https://github.com/franalgaba/chatgpt-telegram-bot-serverless + +https://github.com/gpchelkin/scdlbot/blob/d64d14f6c6d357ba818e80b8a0a9291c2146d6fe/scdlbot/__main__.py#L8 + +消息使用的markdown渲染是我的另一个[项目](https://github.com/yym68686/md2tgmd)。 + +duckduckgo AI: https://github.com/mrgick/duck_chat + +## 赞助商 + +我们感谢以下赞助商的支持: + +- @fasizhuanqian: 300 USDT + +- @ZETA: $380 + +- @yuerbujin: ¥1200 + +- @RR5AM: ¥300 + +- @IKUNONHK: 30 USDT + +- @miya0v0: 30 USDT + +- [@Zeabur](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss): $25 + +- @Bill_ZKE: 20 USDT + +- @wagon_look:¥50 + + + +## 如何赞助我们 + +如果您想支持我们的项目,您可以通过以下方式赞助我们: + +1. [PayPal](https://www.paypal.me/yym68686) + +2. [USDT-TRC20](https://pb.yym68686.top/~USDT-TRC20),USDT-TRC20 钱包地址:`TLFbqSv5pDu5he43mVmK1dNx7yBMFeN7d8` + +3. [微信](https://pb.yym68686.top/~wechat) + +4. [支付宝](https://pb.yym68686.top/~alipay) + +感谢您的支持! + +## 星星历史 + + + 星历史图表 + + +## 许可证 + +本项目根据 GPLv3 许可证授权,这意味着您可以自由复制、分发和修改该软件,只要所有修改和衍生作品也以相同的许可证发布。 diff --git a/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/docker-compose.yaml b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/docker-compose.yaml new file mode 100644 index 0000000..0e73d5e --- /dev/null +++ b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/docker-compose.yaml @@ -0,0 +1,22 @@ +version: "3.5" +services: + chatgptbot: + container_name: chatgptbot + image: yym68686/chatgpt:latest + environment: + - BOT_TOKEN=7908126551:AAE8VhwwfcZ3ru-ecJJo_bMADYADgh1Shzs + - whitelist=6868680170 + - ADMIN_LIST=6868680170 + - temperature=0.7 + - LANGUAGE=Simplified Chinese + - PASS_HISTORY=5 + - GOOGLE_AI_API_KEY=AIzaSyBv2JN5aY_OKDI5e1aVEf6uDQli65X9NZM + - API_URL=https://api.x.ai/v1/chat/completions + - API=xai-pQCto8hXbSLey5rHjohMZGjqaOlSwgFhofEckr5a7q9wQaJbpAV5xyEVGoq8JbhBoX1QVgUm5GzK2DkG + - GROQ_API_KEY=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 + - CUSTOM_MODELS=grok-2-latest + - GPT_ENGINE=grok-2-latest + volumes: + - ./user_configs:/home/user_configs + ports: + - 3080:8080 \ No newline at end of file diff --git a/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/参考项目.txt b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/参考项目.txt new file mode 100644 index 0000000..29e3338 --- /dev/null +++ b/0-部署应用/Tokyo-arm64-01/telegram-ai-bot/参考项目.txt @@ -0,0 +1,2 @@ +https://github.com/yym68686/ChatGPT-Telegram-Bot + diff --git a/1-代理Xray/0-香港节点/0-分层代理回落.json b/1-代理Xray/0-香港节点/0-分层代理回落.json index b46a5c3..10495df 100644 --- a/1-代理Xray/0-香港节点/0-分层代理回落.json +++ b/1-代理Xray/0-香港节点/0-分层代理回落.json @@ -4,12 +4,12 @@ }, "inbounds": [ { - "port": 443, + "port": 24443, "protocol": "vless", "settings": { "clients": [ { - "id": "8c1b580b-c59d-4b89-b020-980fa947539f", + "id": "f8702759-f402-4e85-92a6-8540d577de22", "flow": "xtls-rprx-vision", "email": "cc@vless.com", "level": 0 @@ -17,11 +17,6 @@ ], "decryption": "none", "fallbacks": [ - { - "name": "xx.tc.hk.go.107421.xyz", - "alpn": "h2", - "dest": "@trojan-h2" - }, { "dest": "/dev/shm/h2c.sock", "xver": 2, @@ -42,11 +37,6 @@ "ocspStapling": 3600, "certificateFile": "/root/.acme.sh/book.107421.xyz_ecc/fullchain.cer", "keyFile": "/root/.acme.sh/book.107421.xyz_ecc/book.107421.xyz.key" - }, - { - "ocspStapling": 3600, - "certificateFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/fullchain.cer", - "keyFile": "/root/.acme.sh/xx.tc.hk.go.107421.xyz_ecc/xx.tc.hk.go.107421.xyz.key" } ], "minVersion": "1.2", @@ -64,26 +54,6 @@ "tls" ] } - }, - { - "listen": "@trojan-h2", - "protocol": "trojan", - "settings": { - "clients": [ - { - "email": "ice@qq.com", - "password": "VaC3.123a-asd1234-asdasd.aAsDazzS.123", - "level": 0 - } - ] - }, - "streamSettings": { - "network": "h2", - "security": "none", - "httpSettings": { - "path": "/status" - } - } } ], "outbounds": [ diff --git a/1-代理Xray/98-subscribe-clash.yaml b/1-代理Xray/98-subscribe-clash.yaml index e43bb40..a25c14d 100644 --- a/1-代理Xray/98-subscribe-clash.yaml +++ b/1-代理Xray/98-subscribe-clash.yaml @@ -58,8 +58,7 @@ proxies: - {"type":"socks5","name":"TC-SH","server":"42.192.52.227","port":22887,"username":"zeaslity","password":"a1f090ea-e39c-49e7-a3be-9af26b6ce563","udp":true} - {"type":"socks5","name":"TC-SH-LosA-BanH","server":"42.192.52.227","port":22888,"username":"zeaslity","password":"a1f090ea-e39c-49e7-a3be-9af26b6ce563","udp":true} - {"type":"socks5","name":"TC-SH-Germany","server":"42.192.52.227","port":22889,"username":"zeaslity","password":"a1f090ea-e39c-49e7-a3be-9af26b6ce563","udp":true} - - {"type":"trojan","name":"TC-HK-Trojan","server":"43.154.83.213","port":443,"password":"VaC3.123a-asd1234-asdasd.aAsDazzS.123","udp":true,"skip-cert-verify":false,"sni":"xx.tc.hk.go.107421.xyz","network":"http","ws-opts":{"path":"status"}} - - {"type":"vless","name":"TC-HK-Vless","server":"43.154.83.213","port":443,"uuid":"8c1b580b-c59d-4b89-b020-980fa947539f","skip-cert-verify":false,"network":"tcp","flow":"xtls-rprx-vision","servername":"book.107421.xyz","tls":true,"udp":true} + - {"type":"vless","name":"TC-HK-Vless","server":"43.154.83.213","port":24443,"uuid":"f8702759-f402-4e85-92a6-8540d577de22","skip-cert-verify":false,"network":"tcp","flow":"xtls-rprx-vision","servername":"book.107421.xyz","tls":true,"udp":true} - {"type":"vless","name":"Care-Germany-Vless","server":"45.134.50.233","port":443,"uuid":"b1417d92-998d-410b-a5f3-cf144b6f043e","skip-cert-verify":false,"network":"tcp","flow":"xtls-rprx-vision","servername":"bingo.107421.xyz","tls":true,"udp":true} - {"type":"vless","name":"Oracle-Seoul-ARM01-Vless","server":"132.145.87.10","port":443,"uuid":"1089cc14-557e-47ac-ac85-c07957b3cce3","skip-cert-verify":false,"network":"tcp","flow":"xtls-rprx-vision","servername":"xx.s0.yy.ac.107421.xyz","tls":true,"udp":true} - {"type":"vless","name":"Oracle-Seoul-Vless","server":"140.238.14.103","port":443,"uuid":"1089cc14-557e-47ac-ac85-c07957b3cce3","skip-cert-verify":false,"network":"tcp","flow":"xtls-rprx-vision","servername":"xx.s4.cc.hh.107421.xyz","tls":true,"udp":true} @@ -82,7 +81,6 @@ proxy-groups: - TC-SH - TC-SH-LosA-BanH - TC-SH-Germany - - TC-HK-Trojan - Oracle-Seoul-Vless - Oracle-Tokyo-By-Seoul-Vless - Oracle-Osaka-By-Seoul-Vless @@ -98,7 +96,6 @@ proxy-groups: tolerance: 50 proxies: - BanH-LosA-Trojan - - TC-HK-Trojan - us-central-free - Oracle-Seoul-Vless - Oracle-Seoul-ARM01-Vless @@ -122,7 +119,6 @@ proxy-groups: - Oracle-Seoul-ARM01-Vless - Care-Germany-Vless - BanH-LosA-Vless - - TC-HK-Trojan - TC-HK-Vless - Oracle-Tokyo-By-Seoul-Vless - Oracle-Osaka-By-Seoul-Vless @@ -134,7 +130,6 @@ proxy-groups: - 🚀 节点选择 - 🎯 全球直连 - BanH-LosA-Trojan - - TC-HK-Trojan - us-central-free - Oracle-Seoul-Vless - Oracle-Seoul-ARM01-Vless @@ -145,7 +140,6 @@ proxy-groups: - 🎯 全球直连 - 🚀 节点选择 - BanH-LosA-Trojan - - TC-HK-Trojan - Oracle-Seoul-Vless - us-central-free - Oracle-Seoul-ARM01-Vless @@ -156,7 +150,6 @@ proxy-groups: - 🚀 节点选择 - 🎯 全球直连 - BanH-LosA-Trojan - - TC-HK-Trojan - us-central-free - Oracle-Seoul-Vless - Oracle-Seoul-ARM01-Vless @@ -194,7 +187,6 @@ proxy-groups: - ♻️ 自动选择 - TC-HK-Vless - BanH-LosA-Trojan - - TC-HK-Trojan - us-central-free - Oracle-Seoul-Vless - Oracle-Seoul-ARM01-Vless diff --git a/2-NGINX相关/107421.xyz/21-申请证书.sh b/2-NGINX相关/107421.xyz/21-申请证书.sh index a4a4ad8..4bc725a 100644 --- a/2-NGINX相关/107421.xyz/21-申请证书.sh +++ b/2-NGINX相关/107421.xyz/21-申请证书.sh @@ -2,6 +2,7 @@ export DOMAIN_NAME=xx.tc.hk.go.107421.xyz export DOMAIN_NAME=book.107421.xyz + export DOMAIN_NAME=octopus.107421.xyz export DOMAIN_NAME=xx.t2.ll.c0.107421.xyz export DOMAIN_NAME=zc.p4.cc.xx.107421.xyz @@ -12,6 +13,9 @@ export DOMAIN_NAME=xx.o1.vl.s4.107421.xyz # phoneix-2 export DOMAIN_NAME=xx.p2.vl.s4.107421.xyz +# seoul-arm-01 +export DOMAIN_NAME=dify.107421.xyz + export CF_Token="oXJRP5XI8Zhipa_PtYtB_jy6qWL0I9BosrJEYE8p" export CF_Account_ID="dfaadeb83406ef5ad35da02617af9191"