1proc test_memory_efficiency {range} {
2    r flushall
3    set rd [redis_deferring_client]
4    set base_mem [s used_memory]
5    set written 0
6    for {set j 0} {$j < 10000} {incr j} {
7        set key key:$j
8        set val [string repeat A [expr {int(rand()*$range)}]]
9        $rd set $key $val
10        incr written [string length $key]
11        incr written [string length $val]
12        incr written 2 ;# A separator is the minimum to store key-value data.
13    }
14    for {set j 0} {$j < 10000} {incr j} {
15        $rd read ; # Discard replies
16    }
17
18    set current_mem [s used_memory]
19    set used [expr {$current_mem-$base_mem}]
20    set efficiency [expr {double($written)/$used}]
21    return $efficiency
22}
23
24start_server {tags {"memefficiency"}} {
25    foreach {size_range expected_min_efficiency} {
26        32    0.15
27        64    0.25
28        128   0.35
29        1024  0.75
30        16384 0.82
31    } {
32        test "Memory efficiency with values in range $size_range" {
33            set efficiency [test_memory_efficiency $size_range]
34            assert {$efficiency >= $expected_min_efficiency}
35        }
36    }
37}
38
39start_server {tags {"defrag"}} {
40    if {[string match {*jemalloc*} [s mem_allocator]]} {
41        test "Active defrag" {
42            r config set activedefrag no
43            r config set active-defrag-threshold-lower 5
44            r config set active-defrag-cycle-min 65
45            r config set active-defrag-cycle-max 75
46            r config set active-defrag-ignore-bytes 2mb
47            r config set maxmemory 100mb
48            r config set maxmemory-policy allkeys-lru
49            r debug populate 700000 asdf 150
50            r debug populate 170000 asdf 300
51            r ping ;# trigger eviction following the previous population
52            after 120 ;# serverCron only updates the info once in 100ms
53            set frag [s allocator_frag_ratio]
54            if {$::verbose} {
55                puts "frag $frag"
56            }
57            assert {$frag >= 1.4}
58            catch {r config set activedefrag yes} e
59            if {![string match {DISABLED*} $e]} {
60                # Wait for the active defrag to start working (decision once a
61                # second).
62                wait_for_condition 50 100 {
63                    [s active_defrag_running] ne 0
64                } else {
65                    fail "defrag not started."
66                }
67
68                # Wait for the active defrag to stop working.
69                wait_for_condition 150 100 {
70                    [s active_defrag_running] eq 0
71                } else {
72                    after 120 ;# serverCron only updates the info once in 100ms
73                    puts [r info memory]
74                    puts [r memory malloc-stats]
75                    fail "defrag didn't stop."
76                }
77
78                # Test the the fragmentation is lower.
79                after 120 ;# serverCron only updates the info once in 100ms
80                set frag [s allocator_frag_ratio]
81                if {$::verbose} {
82                    puts "frag $frag"
83                }
84                assert {$frag < 1.1}
85            } else {
86                set _ ""
87            }
88        } {}
89
90        test "Active defrag big keys" {
91            r flushdb
92            r config resetstat
93            r config set save "" ;# prevent bgsave from interfereing with save below
94            r config set activedefrag no
95            r config set active-defrag-max-scan-fields 1000
96            r config set active-defrag-threshold-lower 5
97            r config set active-defrag-cycle-min 65
98            r config set active-defrag-cycle-max 75
99            r config set active-defrag-ignore-bytes 2mb
100            r config set maxmemory 0
101            r config set list-max-ziplist-size 5 ;# list of 10k items will have 2000 quicklist nodes
102            r config set stream-node-max-entries 5
103            r hmset hash h1 v1 h2 v2 h3 v3
104            r lpush list a b c d
105            r zadd zset 0 a 1 b 2 c 3 d
106            r sadd set a b c d
107            r xadd stream * item 1 value a
108            r xadd stream * item 2 value b
109            r xgroup create stream mygroup 0
110            r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
111
112            # create big keys with 10k items
113            set rd [redis_deferring_client]
114            for {set j 0} {$j < 10000} {incr j} {
115                $rd hset bighash $j [concat "asdfasdfasdf" $j]
116                $rd lpush biglist [concat "asdfasdfasdf" $j]
117                $rd zadd bigzset $j [concat "asdfasdfasdf" $j]
118                $rd sadd bigset [concat "asdfasdfasdf" $j]
119                $rd xadd bigstream * item 1 value a
120            }
121            for {set j 0} {$j < 50000} {incr j} {
122                $rd read ; # Discard replies
123            }
124
125            set expected_frag 1.7
126            if {$::accurate} {
127                # scale the hash to 1m fields in order to have a measurable the latency
128                for {set j 10000} {$j < 1000000} {incr j} {
129                    $rd hset bighash $j [concat "asdfasdfasdf" $j]
130                }
131                for {set j 10000} {$j < 1000000} {incr j} {
132                    $rd read ; # Discard replies
133                }
134                # creating that big hash, increased used_memory, so the relative frag goes down
135                set expected_frag 1.3
136            }
137
138            # add a mass of string keys
139            for {set j 0} {$j < 500000} {incr j} {
140                $rd setrange $j 150 a
141            }
142            for {set j 0} {$j < 500000} {incr j} {
143                $rd read ; # Discard replies
144            }
145            assert {[r dbsize] == 500010}
146
147            # create some fragmentation
148            for {set j 0} {$j < 500000} {incr j 2} {
149                $rd del $j
150            }
151            for {set j 0} {$j < 500000} {incr j 2} {
152                $rd read ; # Discard replies
153            }
154            assert {[r dbsize] == 250010}
155
156            # start defrag
157            after 120 ;# serverCron only updates the info once in 100ms
158            set frag [s allocator_frag_ratio]
159            if {$::verbose} {
160                puts "frag $frag"
161            }
162            assert {$frag >= $expected_frag}
163            r config set latency-monitor-threshold 5
164            r latency reset
165
166            set digest [r debug digest]
167            catch {r config set activedefrag yes} e
168            if {![string match {DISABLED*} $e]} {
169                # wait for the active defrag to start working (decision once a second)
170                wait_for_condition 50 100 {
171                    [s active_defrag_running] ne 0
172                } else {
173                    fail "defrag not started."
174                }
175
176                # wait for the active defrag to stop working
177                wait_for_condition 500 100 {
178                    [s active_defrag_running] eq 0
179                } else {
180                    after 120 ;# serverCron only updates the info once in 100ms
181                    puts [r info memory]
182                    puts [r memory malloc-stats]
183                    fail "defrag didn't stop."
184                }
185
186                # test the the fragmentation is lower
187                after 120 ;# serverCron only updates the info once in 100ms
188                set frag [s allocator_frag_ratio]
189                set max_latency 0
190                foreach event [r latency latest] {
191                    lassign $event eventname time latency max
192                    if {$eventname == "active-defrag-cycle"} {
193                        set max_latency $max
194                    }
195                }
196                if {$::verbose} {
197                    puts "frag $frag"
198                    puts "max latency $max_latency"
199                    puts [r latency latest]
200                    puts [r latency history active-defrag-cycle]
201                }
202                assert {$frag < 1.1}
203                # due to high fragmentation, 10hz, and active-defrag-cycle-max set to 75,
204                # we expect max latency to be not much higher than 75ms
205                assert {$max_latency <= 120}
206            }
207            # verify the data isn't corrupted or changed
208            set newdigest [r debug digest]
209            assert {$digest eq $newdigest}
210            r save ;# saving an rdb iterates over all the data / pointers
211        } {OK}
212    }
213}
214