391 lines
14 KiB
Plaintext
391 lines
14 KiB
Plaintext
|
|
bind 0.0.0.0
|
|
|
|
protected-mode no
|
|
|
|
port RedisPort
|
|
|
|
tcp-backlog 511
|
|
|
|
timeout 0
|
|
|
|
tcp-keepalive 300
|
|
|
|
supervised auto
|
|
|
|
daemonize yes
|
|
|
|
pidfile "/var/run/redis-RedisPort.pid"
|
|
|
|
loglevel notice
|
|
|
|
logfile "/var/log/redis_RedisPort.log"
|
|
|
|
databases 16
|
|
|
|
always-show-logo no
|
|
|
|
set-proc-title yes
|
|
|
|
proc-title-template "{title} {listen-addr} {server-mode}"
|
|
|
|
################################ SNAPSHOTTING ################################
|
|
|
|
# Save the DB to disk.
|
|
#
|
|
# save <seconds> <changes>
|
|
#
|
|
# Redis will save the DB if both the given number of seconds and the given
|
|
# number of write operations against the DB occurred.
|
|
#
|
|
# Snapshotting can be completely disabled with a single empty string argument
|
|
# as in following example:
|
|
#
|
|
# save ""
|
|
#
|
|
# Unless specified otherwise, by default Redis will save the DB:
|
|
# * After 3600 seconds (an hour) if at least 1 key changed
|
|
# * After 300 seconds (5 minutes) if at least 100 keys changed
|
|
# * After 60 seconds if at least 10000 keys changed
|
|
#
|
|
# You can set these explicitly by uncommenting the three following lines.
|
|
#
|
|
# save 3600 1
|
|
# save 300 100
|
|
# save 60 10000
|
|
|
|
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
|
# (at least one save point) and the latest background save failed.
|
|
# This will make the user aware (in a hard way) that data is not persisting
|
|
# on disk properly, otherwise chances are that no one will notice and some
|
|
# disaster will happen.
|
|
#
|
|
# If the background saving process will start working again Redis will
|
|
# automatically allow writes again.
|
|
#
|
|
# However if you have setup your proper monitoring of the Redis server
|
|
# and persistence, you may want to disable this feature so that Redis will
|
|
# continue to work as usual even if there are problems with disk,
|
|
# permissions, and so forth.
|
|
stop-writes-on-bgsave-error yes
|
|
|
|
# Compress string objects using LZF when dump .rdb databases?
|
|
# By default compression is enabled as it's almost always a win.
|
|
# If you want to save some CPU in the saving child set it to 'no' but
|
|
# the dataset will likely be bigger if you have compressible values or keys.
|
|
rdbcompression yes
|
|
|
|
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
|
# This makes the format more resistant to corruption but there is a performance
|
|
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
|
# for maximum performances.
|
|
#
|
|
# RDB files created with checksum disabled have a checksum of zero that will
|
|
# tell the loading code to skip the check.
|
|
rdbchecksum yes
|
|
|
|
# Enables or disables full sanitation checks for ziplist and listpack etc when
|
|
# loading an RDB or RESTORE payload. This reduces the chances of a assertion or
|
|
# crash later on while processing commands.
|
|
# Options:
|
|
# no - Never perform full sanitation
|
|
# yes - Always perform full sanitation
|
|
# clients - Perform full sanitation only for user connections.
|
|
# Excludes: RDB files, RESTORE commands received from the master
|
|
# connection, and client connections which have the
|
|
# skip-sanitize-payload ACL flag.
|
|
# The default should be 'clients' but since it currently affects cluster
|
|
# resharding via MIGRATE, it is temporarily set to 'no' by default.
|
|
#
|
|
# sanitize-dump-payload no
|
|
|
|
# The filename where to dump the DB
|
|
dbfilename dump.rdb
|
|
|
|
|
|
rdb-del-sync-files no
|
|
|
|
|
|
dir /var/redis/RedisPort
|
|
|
|
|
|
acllog-max-len 128
|
|
|
|
|
|
lazyfree-lazy-eviction no
|
|
lazyfree-lazy-expire no
|
|
lazyfree-lazy-server-del no
|
|
replica-lazy-flush no
|
|
|
|
# It is also possible, for the case when to replace the user code DEL calls
|
|
# with UNLINK calls is not easy, to modify the default behavior of the DEL
|
|
# command to act exactly like UNLINK, using the following configuration
|
|
# directive:
|
|
|
|
lazyfree-lazy-user-del no
|
|
|
|
# FLUSHDB, FLUSHALL, and SCRIPT FLUSH support both asynchronous and synchronous
|
|
# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the
|
|
# commands. When neither flag is passed, this directive will be used to determine
|
|
# if the data should be deleted asynchronously.
|
|
|
|
lazyfree-lazy-user-flush no
|
|
oom-score-adj no
|
|
|
|
oom-score-adj-values 0 200 800
|
|
|
|
|
|
disable-thp yes
|
|
|
|
|
|
appendonly no
|
|
|
|
# The name of the append only file (default: "appendonly.aof")
|
|
|
|
appendfilename "appendonly.aof"
|
|
|
|
# The fsync() call tells the Operating System to actually write data on disk
|
|
# instead of waiting for more data in the output buffer. Some OS will really flush
|
|
# data on disk, some other OS will just try to do it ASAP.
|
|
#
|
|
# Redis supports three different modes:
|
|
#
|
|
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
|
# always: fsync after every write to the append only log. Slow, Safest.
|
|
# everysec: fsync only one time every second. Compromise.
|
|
#
|
|
# The default is "everysec", as that's usually the right compromise between
|
|
# speed and data safety. It's up to you to understand if you can relax this to
|
|
# "no" that will let the operating system flush the output buffer when
|
|
# it wants, for better performances (but if you can live with the idea of
|
|
# some data loss consider the default persistence mode that's snapshotting),
|
|
# or on the contrary, use "always" that's very slow but a bit safer than
|
|
# everysec.
|
|
#
|
|
# More details please check the following article:
|
|
# http://antirez.com/post/redis-persistence-demystified.html
|
|
#
|
|
# If unsure, use "everysec".
|
|
|
|
# appendfsync always
|
|
appendfsync everysec
|
|
# appendfsync no
|
|
|
|
|
|
no-appendfsync-on-rewrite no
|
|
|
|
# Automatic rewrite of the append only file.
|
|
# Redis is able to automatically rewrite the log file implicitly calling
|
|
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
|
#
|
|
# This is how it works: Redis remembers the size of the AOF file after the
|
|
# latest rewrite (if no rewrite has happened since the restart, the size of
|
|
# the AOF at startup is used).
|
|
#
|
|
# This base size is compared to the current size. If the current size is
|
|
# bigger than the specified percentage, the rewrite is triggered. Also
|
|
# you need to specify a minimal size for the AOF file to be rewritten, this
|
|
# is useful to avoid rewriting the AOF file even if the percentage increase
|
|
# is reached but it is still pretty small.
|
|
#
|
|
# Specify a percentage of zero in order to disable the automatic AOF
|
|
# rewrite feature.
|
|
|
|
auto-aof-rewrite-percentage 100
|
|
auto-aof-rewrite-min-size 64mb
|
|
|
|
|
|
# will be found.
|
|
aof-load-truncated yes
|
|
|
|
# When rewriting the AOF file, Redis is able to use an RDB preamble in the
|
|
# AOF file for faster rewrites and recoveries. When this option is turned
|
|
# on the rewritten AOF file is composed of two different stanzas:
|
|
#
|
|
# [RDB file][AOF tail]
|
|
#
|
|
# When loading, Redis recognizes that the AOF file starts with the "REDIS"
|
|
# string and loads the prefixed RDB file, then continues loading the AOF
|
|
# tail.
|
|
aof-use-rdb-preamble yes
|
|
|
|
|
|
lua-time-limit 5000
|
|
|
|
|
|
################################## SLOW LOG ###################################
|
|
|
|
# The Redis Slow Log is a system to log queries that exceeded a specified
|
|
# execution time. The execution time does not include the I/O operations
|
|
# like talking with the client, sending the reply and so forth,
|
|
# but just the time needed to actually execute the command (this is the only
|
|
# stage of command execution where the thread is blocked and can not serve
|
|
# other requests in the meantime).
|
|
#
|
|
# You can configure the slow log with two parameters: one tells Redis
|
|
# what is the execution time, in microseconds, to exceed in order for the
|
|
# command to get logged, and the other parameter is the length of the
|
|
# slow log. When a new command is logged the oldest one is removed from the
|
|
# queue of logged commands.
|
|
|
|
# The following time is expressed in microseconds, so 1000000 is equivalent
|
|
# to one second. Note that a negative number disables the slow log, while
|
|
# a value of zero forces the logging of every command.
|
|
slowlog-log-slower-than 10000
|
|
|
|
# There is no limit to this length. Just be aware that it will consume memory.
|
|
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
|
slowlog-max-len 128
|
|
|
|
################################ LATENCY MONITOR ##############################
|
|
|
|
# The Redis latency monitoring subsystem samples different operations
|
|
# at runtime in order to collect data related to possible sources of
|
|
# latency of a Redis instance.
|
|
#
|
|
# Via the LATENCY command this information is available to the user that can
|
|
# print graphs and obtain reports.
|
|
#
|
|
# The system only logs operations that were performed in a time equal or
|
|
# greater than the amount of milliseconds specified via the
|
|
# latency-monitor-threshold configuration directive. When its value is set
|
|
# to zero, the latency monitor is turned off.
|
|
#
|
|
# By default latency monitoring is disabled since it is mostly not needed
|
|
# if you don't have latency issues, and collecting data has a performance
|
|
# impact, that while very small, can be measured under big load. Latency
|
|
# monitoring can easily be enabled at runtime using the command
|
|
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
|
latency-monitor-threshold 0
|
|
|
|
|
|
notify-keyspace-events ""
|
|
|
|
# Hashes are encoded using a memory efficient data structure when they have a
|
|
# small number of entries, and the biggest entry does not exceed a given
|
|
# threshold. These thresholds can be configured using the following directives.
|
|
hash-max-ziplist-entries 512
|
|
hash-max-ziplist-value 64
|
|
|
|
# Lists are also encoded in a special way to save a lot of space.
|
|
# The number of entries allowed per internal list node can be specified
|
|
# as a fixed maximum size or a maximum number of elements.
|
|
# For a fixed maximum size, use -5 through -1, meaning:
|
|
# -5: max size: 64 Kb <-- not recommended for normal workloads
|
|
# -4: max size: 32 Kb <-- not recommended
|
|
# -3: max size: 16 Kb <-- probably not recommended
|
|
# -2: max size: 8 Kb <-- good
|
|
# -1: max size: 4 Kb <-- good
|
|
# Positive numbers mean store up to _exactly_ that number of elements
|
|
# per list node.
|
|
# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
|
|
# but if your use case is unique, adjust the settings as necessary.
|
|
list-max-ziplist-size -2
|
|
|
|
# Lists may also be compressed.
|
|
# Compress depth is the number of quicklist ziplist nodes from *each* side of
|
|
# the list to *exclude* from compression. The head and tail of the list
|
|
|
|
# etc.
|
|
list-compress-depth 0
|
|
|
|
# Sets have a special encoding in just one case: when a set is composed
|
|
# of just strings that happen to be integers in radix 10 in the range
|
|
# of 64 bit signed integers.
|
|
# The following configuration setting sets the limit in the size of the
|
|
# set in order to use this special memory saving encoding.
|
|
set-max-intset-entries 512
|
|
|
|
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
|
# order to save a lot of space. This encoding is only used when the length and
|
|
# elements of a sorted set are below the following limits:
|
|
zset-max-ziplist-entries 128
|
|
zset-max-ziplist-value 64
|
|
|
|
# HyperLogLog sparse representation bytes limit. The limit includes the
|
|
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
|
# this limit, it is converted into the dense representation.
|
|
#
|
|
# A value greater than 16000 is totally useless, since at that point the
|
|
# dense representation is more memory efficient.
|
|
#
|
|
# The suggested value is ~ 3000 in order to have the benefits of
|
|
# the space efficient encoding without slowing down too much PFADD,
|
|
# which is O(N) with the sparse encoding. The value can be raised to
|
|
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
|
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
|
hll-sparse-max-bytes 3000
|
|
|
|
# Streams macro node max size / items. The stream data structure is a radix
|
|
# tree of big nodes that encode multiple items inside. Using this configuration
|
|
# it is possible to configure how big a single node can be in bytes, and the
|
|
# maximum number of items it may contain before switching to a new node when
|
|
# appending new stream entries. If any of the following settings are set to
|
|
# zero, the limit is ignored, so for instance it is possible to set just a
|
|
# max entries limit by setting max-bytes to 0 and max-entries to the desired
|
|
# value.
|
|
stream-node-max-bytes 4096
|
|
stream-node-max-entries 100
|
|
|
|
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
|
# order to help rehashing the main Redis hash table (the one mapping top-level
|
|
# keys to values). The hash table implementation Redis uses (see dict.c)
|
|
# performs a lazy rehashing: the more operation you run into a hash table
|
|
# that is rehashing, the more rehashing "steps" are performed, so if the
|
|
# server is idle the rehashing is never complete and some more memory is used
|
|
# by the hash table.
|
|
#
|
|
# The default is to use this millisecond 10 times every second in order to
|
|
# actively rehash the main dictionaries, freeing memory when possible.
|
|
#
|
|
# If unsure:
|
|
# use "activerehashing no" if you have hard latency requirements and it is
|
|
# not a good thing in your environment that Redis can reply from time to time
|
|
# to queries with 2 milliseconds delay.
|
|
#
|
|
# use "activerehashing yes" if you don't have such hard requirements but
|
|
# want to free memory asap when possible.
|
|
activerehashing yes
|
|
|
|
|
|
#
|
|
# Both the hard or the soft limit can be disabled by setting them to zero.
|
|
client-output-buffer-limit normal 0 0 0
|
|
client-output-buffer-limit replica 256mb 64mb 60
|
|
client-output-buffer-limit pubsub 32mb 8mb 60
|
|
|
|
|
|
hz 10
|
|
|
|
# Normally it is useful to have an HZ value which is proportional to the
|
|
# number of clients connected. This is useful in order, for instance, to
|
|
# avoid too many clients are processed for each background task invocation
|
|
# in order to avoid latency spikes.
|
|
#
|
|
# Since the default HZ value by default is conservatively set to 10, Redis
|
|
# offers, and enables by default, the ability to use an adaptive HZ value
|
|
# which will temporarily raise when there are many connected clients.
|
|
#
|
|
# When dynamic HZ is enabled, the actual configured HZ will be used
|
|
# as a baseline, but multiples of the configured HZ value will be actually
|
|
# used as needed once more clients are connected. In this way an idle
|
|
# instance will use very little CPU time while a busy instance will be
|
|
# more responsive.
|
|
dynamic-hz yes
|
|
|
|
# When a child rewrites the AOF file, if the following option is enabled
|
|
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
|
# in order to commit the file to the disk more incrementally and avoid
|
|
# big latency spikes.
|
|
aof-rewrite-incremental-fsync yes
|
|
|
|
# When redis saves RDB file, if the following option is enabled
|
|
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
|
# in order to commit the file to the disk more incrementally and avoid
|
|
# big latency spikes.
|
|
rdb-save-incremental-fsync yes
|
|
|
|
|
|
jemalloc-bg-thread yes
|
|
|