1start_server {tags {"maxmemory"}} {
2    test "Without maxmemory small integers are shared" {
3        r config set maxmemory 0
4        r set a 1
5        assert {[r object refcount a] > 1}
6    }
7
8    test "With maxmemory and non-LRU policy integers are still shared" {
9        r config set maxmemory 1073741824
10        r config set maxmemory-policy allkeys-random
11        r set a 1
12        assert {[r object refcount a] > 1}
13    }
14
15    test "With maxmemory and LRU policy integers are not shared" {
16        r config set maxmemory 1073741824
17        r config set maxmemory-policy allkeys-lru
18        r set a 1
19        r config set maxmemory-policy volatile-lru
20        r set b 1
21        assert {[r object refcount a] == 1}
22        assert {[r object refcount b] == 1}
23        r config set maxmemory 0
24    }
25
26    foreach policy {
27        allkeys-random allkeys-lru allkeys-lfu volatile-lru volatile-lfu volatile-random volatile-ttl
28    } {
29        test "maxmemory - is the memory limit honoured? (policy $policy)" {
30            # make sure to start with a blank instance
31            r flushall
32            # Get the current memory limit and calculate a new limit.
33            # We just add 100k to the current memory size so that it is
34            # fast for us to reach that limit.
35            set used [s used_memory]
36            set limit [expr {$used+100*1024}]
37            r config set maxmemory $limit
38            r config set maxmemory-policy $policy
39            # Now add keys until the limit is almost reached.
40            set numkeys 0
41            while 1 {
42                r setex [randomKey] 10000 x
43                incr numkeys
44                if {[s used_memory]+4096 > $limit} {
45                    assert {$numkeys > 10}
46                    break
47                }
48            }
49            # If we add the same number of keys already added again, we
50            # should still be under the limit.
51            for {set j 0} {$j < $numkeys} {incr j} {
52                r setex [randomKey] 10000 x
53            }
54            assert {[s used_memory] < ($limit+4096)}
55        }
56    }
57
58    foreach policy {
59        allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl
60    } {
61        test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" {
62            # make sure to start with a blank instance
63            r flushall
64            # Get the current memory limit and calculate a new limit.
65            # We just add 100k to the current memory size so that it is
66            # fast for us to reach that limit.
67            set used [s used_memory]
68            set limit [expr {$used+100*1024}]
69            r config set maxmemory $limit
70            r config set maxmemory-policy $policy
71            # Now add keys until the limit is almost reached.
72            set numkeys 0
73            while 1 {
74                r set [randomKey] x
75                incr numkeys
76                if {[s used_memory]+4096 > $limit} {
77                    assert {$numkeys > 10}
78                    break
79                }
80            }
81            # If we add the same number of keys already added again and
82            # the policy is allkeys-* we should still be under the limit.
83            # Otherwise we should see an error reported by Redis.
84            set err 0
85            for {set j 0} {$j < $numkeys} {incr j} {
86                if {[catch {r set [randomKey] x} e]} {
87                    if {[string match {*used memory*} $e]} {
88                        set err 1
89                    }
90                }
91            }
92            if {[string match allkeys-* $policy]} {
93                assert {[s used_memory] < ($limit+4096)}
94            } else {
95                assert {$err == 1}
96            }
97        }
98    }
99
100    foreach policy {
101        volatile-lru volatile-lfu volatile-random volatile-ttl
102    } {
103        test "maxmemory - policy $policy should only remove volatile keys." {
104            # make sure to start with a blank instance
105            r flushall
106            # Get the current memory limit and calculate a new limit.
107            # We just add 100k to the current memory size so that it is
108            # fast for us to reach that limit.
109            set used [s used_memory]
110            set limit [expr {$used+100*1024}]
111            r config set maxmemory $limit
112            r config set maxmemory-policy $policy
113            # Now add keys until the limit is almost reached.
114            set numkeys 0
115            while 1 {
116                # Odd keys are volatile
117                # Even keys are non volatile
118                if {$numkeys % 2} {
119                    r setex "key:$numkeys" 10000 x
120                } else {
121                    r set "key:$numkeys" x
122                }
123                if {[s used_memory]+4096 > $limit} {
124                    assert {$numkeys > 10}
125                    break
126                }
127                incr numkeys
128            }
129            # Now we add the same number of volatile keys already added.
130            # We expect Redis to evict only volatile keys in order to make
131            # space.
132            set err 0
133            for {set j 0} {$j < $numkeys} {incr j} {
134                catch {r setex "foo:$j" 10000 x}
135            }
136            # We should still be under the limit.
137            assert {[s used_memory] < ($limit+4096)}
138            # However all our non volatile keys should be here.
139            for {set j 0} {$j < $numkeys} {incr j 2} {
140                assert {[r exists "key:$j"]}
141            }
142        }
143    }
144}
145
146proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
147    start_server {tags {"maxmemory"}} {
148        start_server {} {
149        set slave_pid [s process_id]
150        test "$test_name" {
151            set slave [srv 0 client]
152            set slave_host [srv 0 host]
153            set slave_port [srv 0 port]
154            set master [srv -1 client]
155            set master_host [srv -1 host]
156            set master_port [srv -1 port]
157
158            # add 100 keys of 100k (10MB total)
159            for {set j 0} {$j < 100} {incr j} {
160                $master setrange "key:$j" 100000 asdf
161            }
162
163            # make sure master doesn't disconnect slave because of timeout
164            $master config set repl-timeout 1200 ;# 20 minutes (for valgrind and slow machines)
165            $master config set maxmemory-policy allkeys-random
166            $master config set client-output-buffer-limit "replica 100000000 100000000 300"
167            $master config set repl-backlog-size [expr {10*1024}]
168
169            $slave slaveof $master_host $master_port
170            wait_for_condition 50 100 {
171                [s 0 master_link_status] eq {up}
172            } else {
173                fail "Replication not started."
174            }
175
176            # measure used memory after the slave connected and set maxmemory
177            set orig_used [s -1 used_memory]
178            set orig_client_buf [s -1 mem_clients_normal]
179            set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
180            set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
181            set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 20*1024}]
182
183            if {$limit_memory==1} {
184                $master config set maxmemory $limit
185            }
186
187            # put the slave to sleep
188            set rd_slave [redis_deferring_client]
189            exec kill -SIGSTOP $slave_pid
190
191            # send some 10mb worth of commands that don't increase the memory usage
192            if {$pipeline == 1} {
193                set rd_master [redis_deferring_client -1]
194                for {set k 0} {$k < $cmd_count} {incr k} {
195                    $rd_master setrange key:0 0 [string repeat A $payload_len]
196                }
197                for {set k 0} {$k < $cmd_count} {incr k} {
198                    #$rd_master read
199                }
200            } else {
201                for {set k 0} {$k < $cmd_count} {incr k} {
202                    $master setrange key:0 0 [string repeat A $payload_len]
203                }
204            }
205
206            set new_used [s -1 used_memory]
207            set slave_buf [s -1 mem_clients_slaves]
208            set client_buf [s -1 mem_clients_normal]
209            set mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
210            set used_no_repl [expr {$new_used - $mem_not_counted_for_evict}]
211            set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}]
212
213            assert {[$master dbsize] == 100}
214            assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers
215            set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
216            assert {$delta < $delta_max && $delta > -$delta_max}
217
218            $master client kill type slave
219            set killed_used [s -1 used_memory]
220            set killed_slave_buf [s -1 mem_clients_slaves]
221            set killed_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
222            set killed_used_no_repl [expr {$killed_used - $killed_mem_not_counted_for_evict}]
223            set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
224            assert {$killed_slave_buf == 0}
225            assert {$delta_no_repl > -$delta_max && $delta_no_repl < $delta_max}
226
227        }
228        # unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
229        exec kill -SIGCONT $slave_pid
230        }
231    }
232}
233
234# test that slave buffer are counted correctly
235# we wanna use many small commands, and we don't wanna wait long
236# so we need to use a pipeline (redis_deferring_client)
237# that may cause query buffer to fill and induce eviction, so we disable it
238test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
239
240# test that slave buffer don't induce eviction
241# test again with fewer (and bigger) commands without pipeline, but with eviction
242test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
243
244