-
Notifications
You must be signed in to change notification settings - Fork 26
/
badger.ini
154 lines (123 loc) · 7.52 KB
/
badger.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# Dir is the path of the directory where key data will be stored in.
# If it doesn't exist, Badger will try to create it for you.
Dir = /tmp/dkvsrv
# ValueDir is the path of the directory where value data will be stored in.
# If it doesn't exist, Badger will try to create it for you.
ValueDir = /tmp/dkvsrv
# When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
# performance, but may cause data loss in case of crash.
SyncWrites = true
# NumVersionsToKeep sets how many versions to keep per key at most.
NumVersionsToKeep = 1
# When ReadOnly is true the DB will be opened on read-only mode.
# Multiple processes can open the same Badger DB.
# Note: if the DB being opened had crashed before and has vlog data to be replayed,
# ReadOnly will cause Open to fail with an appropriate message.
ReadOnly = false
# When compression is enabled, every block will be compressed using the specified algorithm.
# This option doesn't affect existing tables. Only the newly created tables will be compressed.
# The default compression algorithm used is zstd when built with Cgo. Without Cgo, the default is snappy.
# Compression is enabled by default.
Compression = 0
# When badger is running in InMemory mode, everything is stored in memory.
# No value/sst files are created. In case of a crash all data will be lost.
InMemory = false
# BaseLevelSize sets the maximum size target for the base level.
# The default value is 10MB.
BaseLevelSize = 10485760
# BaseTableSize sets the maximum size in bytes for LSM table or file in the base level.
# The default value of BaseTableSize is 2MB.
BaseTableSize = 2097152
# MemTableSize sets the maximum size in bytes for memtable table.
# The default value of MemTableSize is 64MB.
MemTableSize = 67108864
# LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
# Once a level grows to be larger than this ratio allowed, the compaction process will be triggered.
LevelSizeMultiplier = 10
# Maximum number of levels of compaction allowed in the LSM.
MaxLevels = 7
# ValueThreshold sets the threshold used to decide whether a value is stored
# directly in the LSM tree or separately in the log value files.
ValueThreshold = 1024
# NumMemtables sets the maximum number of tables to keep in memory before stalling.
NumMemtables = 5
# BlockSize sets the size of any block in SSTable. SSTable is divided into multiple blocks
# internally. Each block is compressed using prefix diff encoding.
BlockSize = 4096
# BloomFalsePositive sets the false positive probability of the bloom filter in any SSTable.
# Before reading a key from table, the bloom filter is checked for key existence.
# BloomFalsePositive might impact read performance of DB.
# Lower BloomFalsePositive value might consume more memory.
BloomFalsePositive = 0.01
# This value specifies how much data cache should hold in memory. A small size
# of cache means lower memory consumption and lookups/iterations would take
# longer. It is recommended to use a cache if you're using compression or encryption.
# If compression and encryption both are disabled, adding a cache will lead to
# unnecessary overhead which will affect the read performance. Setting size to
# zero disables the cache altogether.
BlockCacheSize = 0
# This value specifies how much memory should be used by table indices. These
# indices include the block offsets and the bloomfilters. Badger uses bloom
# filters to speed up lookups. Each table has its own bloom
# filter and each bloom filter is approximately of 5 MB.
#
# Zero value for IndexCacheSize means all the indices will be kept in
# memory and the cache is disabled.
#
# The default value of IndexCacheSize is 0 which means all indices are kept in
# memory.
IndexCacheSize = 0
# NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
NumLevelZeroTables = 5
# NumLevelZeroTablesStall sets the number of Level 0 tables that once reached
# causes the DB to stall until compaction succeeds.
NumLevelZeroTablesStall = 15
# ValueLogFileSize sets the maximum size of a single value log file.
ValueLogFileSize = 1073741823
# ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
# A actual size limit of a value log file is the minimum of ValueLogFileSize and
# ValueLogMaxEntries.
ValueLogMaxEntries = 1000000
# NumCompactors sets the number of compaction workers to run concurrently.
# Setting this to zero stops compactions, which could eventually cause writes to block forever.
# The default value of NumCompactors is 2. One is dedicated just for L0 and L1.
NumCompactors = 2
# CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
# This ensures that both reads and writes are efficient when the DB is opened later.
# The default value of CompactL0OnClose is false.
CompactL0OnClose = true
# The ZSTD compression algorithm supports 20 compression levels. The higher the compression
# level, the better is the compression ratio but lower is the performance. Lower levels
# have better performance and higher levels have better compression ratios.
# We recommend using level 1 ZSTD Compression Level. Any level higher than 1 seems to
# deteriorate badger's performance.
# The following benchmarks were done on a 4 KB block size (default block size). The compression is
# ratio supposed to increase with increasing compression level but since the input for compression
# algorithm is small (4 KB), we don't get significant benefit at level 3. It is advised to write
# your own benchmarks before choosing a compression algorithm or level.
#
# no_compression-16 10 502848865 ns/op 165.46 MB/s -
# zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
# zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
# zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
ZSTDCompressionLevel = 1
# When VerifyValueChecksum is set to true, checksum will be verified for every entry read
# from the value log. If the value is stored in SST (value size less than value threshold) then the
# checksum validation will not be done.
VerifyValueChecksum = false
# When BypassLockGuard option is set, badger will not acquire a lock on the
# directory. This could lead to data corruption if multiple badger instances
# write to the same data directory. Use this option with caution.
BypassLockGuard = false
# ChecksumVerificationMode indicates when the db should verify checksums for SSTable blocks.
# 0 -> indicates DB should not verify checksum for SSTable blocks.
# 1 -> indicates checksum should be verified while opening SSTtable.
# 2 -> indicates checksum should be verified on every SSTable block read.
# 3 -> indicates checksum should be verified on SSTable opening and on every block read.
ChecksumVerificationMode = 0
# Detect conflicts options determines if the transactions would be checked for
# conflicts before committing them. When this option is set to false
# (detectConflicts=false) badger can process transactions at a higher rate.
# Setting this options to false might be useful when the user application
# deals with conflict detection and resolution.
DetectConflicts = true