Add external test that runs without debug command (#9964)

- add needs:debug flag for some tests
- disable "save" in external tests (speedup?)
- use debug_digest proc instead of debug command directly so it can be skipped
- use OBJECT ENCODING instead of DEBUG OBJECT to get encoding
- add a proc for OBJECT REFCOUNT so it can be skipped
- move a bunch of tests in latency_monitor tests to happen later so that latency monitor has some values in it
- add missing close_replication_stream calls
- make sure to close the temp client if DEBUG LOG fails
This commit is contained in:
Oran Agra 2021-12-19 17:41:51 +02:00 committed by GitHub
parent ae2f5b7b2e
commit 6add1b7217
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 129 additions and 80 deletions

View File

@ -15,7 +15,9 @@ jobs:
- name: Build - name: Build
run: make REDIS_CFLAGS=-Werror run: make REDIS_CFLAGS=-Werror
- name: Start redis-server - name: Start redis-server
run: ./src/redis-server --daemonize yes --logfile external-redis.log --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes run: |
./src/redis-server --daemonize yes --save "" --logfile external-redis.log \
--enable-protected-configs yes --enable-debug-command yes --enable-module-command yes
- name: Run external test - name: Run external test
run: | run: |
./runtest \ ./runtest \
@ -36,7 +38,9 @@ jobs:
- name: Build - name: Build
run: make REDIS_CFLAGS=-Werror run: make REDIS_CFLAGS=-Werror
- name: Start redis-server - name: Start redis-server
run: ./src/redis-server --cluster-enabled yes --daemonize yes --logfile external-redis.log --enable-protected-configs yes --enable-debug-command yes --enable-module-command yes run: |
./src/redis-server --cluster-enabled yes --daemonize yes --save "" --logfile external-redis.log \
--enable-protected-configs yes --enable-debug-command yes --enable-module-command yes
- name: Create a single node cluster - name: Create a single node cluster
run: ./src/redis-cli cluster addslots $(for slot in {0..16383}; do echo $slot; done); sleep 5 run: ./src/redis-cli cluster addslots $(for slot in {0..16383}; do echo $slot; done); sleep 5
- name: Run external test - name: Run external test
@ -51,3 +55,25 @@ jobs:
with: with:
name: test-external-cluster-log name: test-external-cluster-log
path: external-redis.log path: external-redis.log
test-external-nodebug:
runs-on: ubuntu-latest
timeout-minutes: 14400
steps:
- uses: actions/checkout@v2
- name: Build
run: make REDIS_CFLAGS=-Werror
- name: Start redis-server
run: |
./src/redis-server --daemonize yes --save "" --logfile external-redis.log
- name: Run external test
run: |
./runtest \
--host 127.0.0.1 --port 6379 \
--tags "-slow -needs:debug"
- name: Archive redis log
if: ${{ failure() }}
uses: actions/upload-artifact@v2
with:
name: test-external-redis-log
path: external-redis.log

View File

@ -46,12 +46,12 @@ start_server {tags {"dismiss external:skip"}} {
# stream # stream
r xadd bigstream * entry1 $bigstr entry2 $bigstr r xadd bigstream * entry1 $bigstr entry2 $bigstr
set digest [r debug digest] set digest [debug_digest]
r config set aof-use-rdb-preamble no r config set aof-use-rdb-preamble no
r bgrewriteaof r bgrewriteaof
waitForBgrewriteaof r waitForBgrewriteaof r
r debug loadaof r debug loadaof
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
} }

View File

@ -38,7 +38,7 @@ set server_path [tmpdir "server.rdb-startup-test"]
start_server [list overrides [list "dir" $server_path] keep_persistence true] { start_server [list overrides [list "dir" $server_path] keep_persistence true] {
test {Server started empty with non-existing RDB file} { test {Server started empty with non-existing RDB file} {
r debug digest debug_digest
} {0000000000000000000000000000000000000000} } {0000000000000000000000000000000000000000}
# Save an RDB file, needed for the next test. # Save an RDB file, needed for the next test.
r save r save
@ -46,7 +46,7 @@ start_server [list overrides [list "dir" $server_path] keep_persistence true] {
start_server [list overrides [list "dir" $server_path] keep_persistence true] { start_server [list overrides [list "dir" $server_path] keep_persistence true] {
test {Server started empty with empty RDB file} { test {Server started empty with empty RDB file} {
r debug digest debug_digest
} {0000000000000000000000000000000000000000} } {0000000000000000000000000000000000000000}
} }
@ -63,16 +63,16 @@ start_server [list overrides [list "dir" $server_path] keep_persistence true] {
set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >] set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0] r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0] r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
set digest [r debug digest] set digest [debug_digest]
r config set sanitize-dump-payload no r config set sanitize-dump-payload no
r debug reload r debug reload
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
} }
test {Test RDB stream encoding - sanitize dump} { test {Test RDB stream encoding - sanitize dump} {
r config set sanitize-dump-payload yes r config set sanitize-dump-payload yes
r debug reload r debug reload
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
} }
# delete the stream, maybe valgrind will find something # delete the stream, maybe valgrind will find something

View File

@ -317,7 +317,7 @@ if {!$::tls} { ;# fake_redis_node doesn't support TLS
assert_match "OK" [r config set repl-diskless-sync yes] assert_match "OK" [r config set repl-diskless-sync yes]
assert_match "OK" [r config set repl-diskless-sync-delay 0] assert_match "OK" [r config set repl-diskless-sync-delay 0]
test_redis_cli_rdb_dump test_redis_cli_rdb_dump
} {} {needs:repl} } {} {needs:repl needs:debug}
test "Scan mode" { test "Scan mode" {
r flushdb r flushdb

View File

@ -88,14 +88,19 @@ proc assert_encoding {enc key} {
if {$::ignoreencoding} { if {$::ignoreencoding} {
return return
} }
set dbg [r debug object $key] set val [r object encoding $key]
assert_match "* encoding:$enc *" $dbg assert_match $enc $val
} }
proc assert_type {type key} { proc assert_type {type key} {
assert_equal $type [r type $key] assert_equal $type [r type $key]
} }
proc assert_refcount {ref key} {
set val [r object refcount $key]
assert_equal $ref $val
}
# Wait for the specified condition to be true, with the specified number of # Wait for the specified condition to be true, with the specified number of
# max retries and delay between retries. Otherwise the 'elsescript' is # max retries and delay between retries. Otherwise the 'elsescript' is
# executed. # executed.
@ -159,7 +164,9 @@ proc test {name code {okpattern undefined} {tags {}}} {
if {$::external} { if {$::external} {
catch { catch {
set r [redis [srv 0 host] [srv 0 port] 0 $::tls] set r [redis [srv 0 host] [srv 0 port] 0 $::tls]
$r debug log "### Starting test $::cur_test" catch {
$r debug log "### Starting test $::cur_test"
}
$r close $r close
} }
} else { } else {

View File

@ -822,11 +822,17 @@ proc punsubscribe {client {channels {}}} {
} }
proc debug_digest_value {key} { proc debug_digest_value {key} {
if {!$::ignoredigest} { if {[lsearch $::denytags "needs:debug"] >= 0 || $::ignoredigest} {
r debug digest-value $key
} else {
return "dummy-digest-value" return "dummy-digest-value"
} }
r debug digest-value $key
}
proc debug_digest {{level 0}} {
if {[lsearch $::denytags "needs:debug"] >= 0 || $::ignoredigest} {
return "dummy-digest"
}
r $level debug digest
} }
proc wait_for_blocked_client {} { proc wait_for_blocked_client {} {

View File

@ -45,11 +45,11 @@ start_server {tags {"aofrw external:skip"}} {
wait_load_handlers_disconnected wait_load_handlers_disconnected
# Get the data set digest # Get the data set digest
set d1 [r debug digest] set d1 [debug_digest]
# Load the AOF # Load the AOF
r debug loadaof r debug loadaof
set d2 [r debug digest] set d2 [debug_digest]
# Make sure they are the same # Make sure they are the same
assert {$d1 eq $d2} assert {$d1 eq $d2}
@ -86,11 +86,11 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
r lpush key $data r lpush key $data
} }
assert_equal [r object encoding key] $e assert_equal [r object encoding key] $e
set d1 [r debug digest] set d1 [debug_digest]
r bgrewriteaof r bgrewriteaof
waitForBgrewriteaof r waitForBgrewriteaof r
r debug loadaof r debug loadaof
set d2 [r debug digest] set d2 [debug_digest]
if {$d1 ne $d2} { if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2" error "assertion:$d1 is not equal to $d2"
} }
@ -114,11 +114,11 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
if {$d ne {string}} { if {$d ne {string}} {
assert_equal [r object encoding key] $e assert_equal [r object encoding key] $e
} }
set d1 [r debug digest] set d1 [debug_digest]
r bgrewriteaof r bgrewriteaof
waitForBgrewriteaof r waitForBgrewriteaof r
r debug loadaof r debug loadaof
set d2 [r debug digest] set d2 [debug_digest]
if {$d1 ne $d2} { if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2" error "assertion:$d1 is not equal to $d2"
} }
@ -140,11 +140,11 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
r hset key $data $data r hset key $data $data
} }
assert_equal [r object encoding key] $e assert_equal [r object encoding key] $e
set d1 [r debug digest] set d1 [debug_digest]
r bgrewriteaof r bgrewriteaof
waitForBgrewriteaof r waitForBgrewriteaof r
r debug loadaof r debug loadaof
set d2 [r debug digest] set d2 [debug_digest]
if {$d1 ne $d2} { if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2" error "assertion:$d1 is not equal to $d2"
} }
@ -166,11 +166,11 @@ start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}}
r zadd key [expr rand()] $data r zadd key [expr rand()] $data
} }
assert_equal [r object encoding key] $e assert_equal [r object encoding key] $e
set d1 [r debug digest] set d1 [debug_digest]
r bgrewriteaof r bgrewriteaof
waitForBgrewriteaof r waitForBgrewriteaof r
r debug loadaof r debug loadaof
set d2 [r debug digest] set d2 [debug_digest]
if {$d1 ne $d2} { if {$d1 ne $d2} {
error "assertion:$d1 is not equal to $d2" error "assertion:$d1 is not equal to $d2"
} }

View File

@ -511,6 +511,7 @@ start_server {tags {"expire"}} {
{restore foo6 * {*} ABSTTL} {restore foo6 * {*} ABSTTL}
{restore foo7 * {*} absttl} {restore foo7 * {*} absttl}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
# Start another server to test replication of TTLs # Start another server to test replication of TTLs
@ -624,6 +625,7 @@ start_server {tags {"expire"}} {
{persist foo} {persist foo}
{del foo} {del foo}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
test {EXPIRE with NX option on a key with ttl} { test {EXPIRE with NX option on a key with ttl} {

View File

@ -115,7 +115,7 @@ start_server {tags {"scripting"}} {
test {FUNCTION - test loading from rdb} { test {FUNCTION - test loading from rdb} {
r debug reload r debug reload
r fcall test 0 r fcall test 0
} {hello} } {hello} {needs:debug}
test {FUNCTION - test fcall_ro with write command} { test {FUNCTION - test fcall_ro with write command} {
r function create lua test REPLACE {return redis.call('set', 'x', '1')} r function create lua test REPLACE {return redis.call('set', 'x', '1')}

View File

@ -250,8 +250,8 @@ start_server {tags {"keyspace"}} {
r copy mylist{t} mynewlist{t} r copy mylist{t} mynewlist{t}
set digest [debug_digest_value mylist{t}] set digest [debug_digest_value mylist{t}]
assert_equal $digest [debug_digest_value mynewlist{t}] assert_equal $digest [debug_digest_value mynewlist{t}]
assert_equal 1 [r object refcount mylist{t}] assert_refcount 1 mylist{t}
assert_equal 1 [r object refcount mynewlist{t}] assert_refcount 1 mynewlist{t}
r del mylist{t} r del mylist{t}
assert_equal $digest [debug_digest_value mynewlist{t}] assert_equal $digest [debug_digest_value mynewlist{t}]
} }
@ -263,8 +263,8 @@ start_server {tags {"keyspace"}} {
r copy set1{t} newset1{t} r copy set1{t} newset1{t}
set digest [debug_digest_value set1{t}] set digest [debug_digest_value set1{t}]
assert_equal $digest [debug_digest_value newset1{t}] assert_equal $digest [debug_digest_value newset1{t}]
assert_equal 1 [r object refcount set1{t}] assert_refcount 1 set1{t}
assert_equal 1 [r object refcount newset1{t}] assert_refcount 1 newset1{t}
r del set1{t} r del set1{t}
assert_equal $digest [debug_digest_value newset1{t}] assert_equal $digest [debug_digest_value newset1{t}]
} }
@ -276,8 +276,8 @@ start_server {tags {"keyspace"}} {
r copy set2{t} newset2{t} r copy set2{t} newset2{t}
set digest [debug_digest_value set2{t}] set digest [debug_digest_value set2{t}]
assert_equal $digest [debug_digest_value newset2{t}] assert_equal $digest [debug_digest_value newset2{t}]
assert_equal 1 [r object refcount set2{t}] assert_refcount 1 set2{t}
assert_equal 1 [r object refcount newset2{t}] assert_refcount 1 newset2{t}
r del set2{t} r del set2{t}
assert_equal $digest [debug_digest_value newset2{t}] assert_equal $digest [debug_digest_value newset2{t}]
} }
@ -289,8 +289,8 @@ start_server {tags {"keyspace"}} {
r copy zset1{t} newzset1{t} r copy zset1{t} newzset1{t}
set digest [debug_digest_value zset1{t}] set digest [debug_digest_value zset1{t}]
assert_equal $digest [debug_digest_value newzset1{t}] assert_equal $digest [debug_digest_value newzset1{t}]
assert_equal 1 [r object refcount zset1{t}] assert_refcount 1 zset1{t}
assert_equal 1 [r object refcount newzset1{t}] assert_refcount 1 newzset1{t}
r del zset1{t} r del zset1{t}
assert_equal $digest [debug_digest_value newzset1{t}] assert_equal $digest [debug_digest_value newzset1{t}]
} }
@ -306,8 +306,8 @@ start_server {tags {"keyspace"}} {
r copy zset2{t} newzset2{t} r copy zset2{t} newzset2{t}
set digest [debug_digest_value zset2{t}] set digest [debug_digest_value zset2{t}]
assert_equal $digest [debug_digest_value newzset2{t}] assert_equal $digest [debug_digest_value newzset2{t}]
assert_equal 1 [r object refcount zset2{t}] assert_refcount 1 zset2{t}
assert_equal 1 [r object refcount newzset2{t}] assert_refcount 1 newzset2{t}
r del zset2{t} r del zset2{t}
assert_equal $digest [debug_digest_value newzset2{t}] assert_equal $digest [debug_digest_value newzset2{t}]
r config set zset-max-ziplist-entries $original_max r config set zset-max-ziplist-entries $original_max
@ -320,8 +320,8 @@ start_server {tags {"keyspace"}} {
r copy hash1{t} newhash1{t} r copy hash1{t} newhash1{t}
set digest [debug_digest_value hash1{t}] set digest [debug_digest_value hash1{t}]
assert_equal $digest [debug_digest_value newhash1{t}] assert_equal $digest [debug_digest_value newhash1{t}]
assert_equal 1 [r object refcount hash1{t}] assert_refcount 1 hash1{t}
assert_equal 1 [r object refcount newhash1{t}] assert_refcount 1 newhash1{t}
r del hash1{t} r del hash1{t}
assert_equal $digest [debug_digest_value newhash1{t}] assert_equal $digest [debug_digest_value newhash1{t}]
} }
@ -337,8 +337,8 @@ start_server {tags {"keyspace"}} {
r copy hash2{t} newhash2{t} r copy hash2{t} newhash2{t}
set digest [debug_digest_value hash2{t}] set digest [debug_digest_value hash2{t}]
assert_equal $digest [debug_digest_value newhash2{t}] assert_equal $digest [debug_digest_value newhash2{t}]
assert_equal 1 [r object refcount hash2{t}] assert_refcount 1 hash2{t}
assert_equal 1 [r object refcount newhash2{t}] assert_refcount 1 newhash2{t}
r del hash2{t} r del hash2{t}
assert_equal $digest [debug_digest_value newhash2{t}] assert_equal $digest [debug_digest_value newhash2{t}]
r config set hash-max-ziplist-entries $original_max r config set hash-max-ziplist-entries $original_max
@ -352,8 +352,8 @@ start_server {tags {"keyspace"}} {
r copy mystream{t} mynewstream{t} r copy mystream{t} mynewstream{t}
set digest [debug_digest_value mystream{t}] set digest [debug_digest_value mystream{t}]
assert_equal $digest [debug_digest_value mynewstream{t}] assert_equal $digest [debug_digest_value mynewstream{t}]
assert_equal 1 [r object refcount mystream{t}] assert_refcount 1 mystream{t}
assert_equal 1 [r object refcount mynewstream{t}] assert_refcount 1 mynewstream{t}
r del mystream{t} r del mystream{t}
assert_equal $digest [debug_digest_value mynewstream{t}] assert_equal $digest [debug_digest_value mynewstream{t}]
} }
@ -379,8 +379,8 @@ start_server {tags {"keyspace"}} {
r copy x{t} newx{t} r copy x{t} newx{t}
set info [r xinfo stream x{t} full] set info [r xinfo stream x{t} full]
assert_equal $info [r xinfo stream newx{t} full] assert_equal $info [r xinfo stream newx{t} full]
assert_equal 1 [r object refcount x{t}] assert_refcount 1 x{t}
assert_equal 1 [r object refcount newx{t}] assert_refcount 1 newx{t}
r del x{t} r del x{t}
assert_equal $info [r xinfo stream newx{t} full] assert_equal $info [r xinfo stream newx{t} full]
r flushdb r flushdb

View File

@ -10,7 +10,7 @@ start_server {tags {"latency-monitor needs:latency"}} {
after 1100 after 1100
r debug sleep 0.5 r debug sleep 0.5
assert {[r latency history command] >= 3} assert {[r latency history command] >= 3}
} } {} {needs:debug}
test {LATENCY HISTORY output is ok} { test {LATENCY HISTORY output is ok} {
set min 250 set min 250
@ -38,20 +38,6 @@ start_server {tags {"latency-monitor needs:latency"}} {
} }
} }
test {LATENCY HISTORY / RESET with wrong event name is fine} {
assert {[llength [r latency history blabla]] == 0}
assert {[r latency reset blabla] == 0}
}
test {LATENCY DOCTOR produces some output} {
assert {[string length [r latency doctor]] > 0}
}
test {LATENCY RESET is able to reset events} {
assert {[r latency reset] > 0}
assert {[r latency latest] eq {}}
}
test {LATENCY of expire events are correctly collected} { test {LATENCY of expire events are correctly collected} {
r config set latency-monitor-threshold 20 r config set latency-monitor-threshold 20
r flushdb r flushdb
@ -72,6 +58,20 @@ start_server {tags {"latency-monitor needs:latency"}} {
assert_match {*expire-cycle*} [r latency latest] assert_match {*expire-cycle*} [r latency latest]
} }
test {LATENCY HISTORY / RESET with wrong event name is fine} {
assert {[llength [r latency history blabla]] == 0}
assert {[r latency reset blabla] == 0}
}
test {LATENCY DOCTOR produces some output} {
assert {[string length [r latency doctor]] > 0}
}
test {LATENCY RESET is able to reset events} {
assert {[r latency reset] > 0}
assert {[r latency latest] eq {}}
}
test {LATENCY HELP should not have unexpected options} { test {LATENCY HELP should not have unexpected options} {
catch {r LATENCY help xxx} e catch {r LATENCY help xxx} e
assert_match "*wrong number of arguments*" $e assert_match "*wrong number of arguments*" $e

View File

@ -61,7 +61,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
r config set latency-monitor-threshold 5 r config set latency-monitor-threshold 5
r latency reset r latency reset
r config set maxmemory 110mb ;# prevent further eviction (not to fail the digest test) r config set maxmemory 110mb ;# prevent further eviction (not to fail the digest test)
set digest [r debug digest] set digest [debug_digest]
catch {r config set activedefrag yes} e catch {r config set activedefrag yes} e
if {[r config get activedefrag] eq "activedefrag yes"} { if {[r config get activedefrag] eq "activedefrag yes"} {
# Wait for the active defrag to start working (decision once a # Wait for the active defrag to start working (decision once a
@ -110,7 +110,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
} }
} }
# verify the data isn't corrupted or changed # verify the data isn't corrupted or changed
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers r save ;# saving an rdb iterates over all the data / pointers
@ -234,7 +234,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
r config set latency-monitor-threshold 5 r config set latency-monitor-threshold 5
r latency reset r latency reset
set digest [r debug digest] set digest [debug_digest]
catch {r config set activedefrag yes} e catch {r config set activedefrag yes} e
if {[r config get activedefrag] eq "activedefrag yes"} { if {[r config get activedefrag] eq "activedefrag yes"} {
# wait for the active defrag to start working (decision once a second) # wait for the active defrag to start working (decision once a second)
@ -282,7 +282,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
} }
} }
# verify the data isn't corrupted or changed # verify the data isn't corrupted or changed
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers r save ;# saving an rdb iterates over all the data / pointers
} {OK} } {OK}
@ -330,7 +330,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
r config set latency-monitor-threshold 5 r config set latency-monitor-threshold 5
r latency reset r latency reset
set digest [r debug digest] set digest [debug_digest]
catch {r config set activedefrag yes} e catch {r config set activedefrag yes} e
if {[r config get activedefrag] eq "activedefrag yes"} { if {[r config get activedefrag] eq "activedefrag yes"} {
# wait for the active defrag to start working (decision once a second) # wait for the active defrag to start working (decision once a second)
@ -383,7 +383,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
assert {$misses < $elements} assert {$misses < $elements}
} }
# verify the data isn't corrupted or changed # verify the data isn't corrupted or changed
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers r save ;# saving an rdb iterates over all the data / pointers
r del biglist1 ;# coverage for quicklistBookmarksClear r del biglist1 ;# coverage for quicklistBookmarksClear
@ -450,7 +450,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
assert {$frag >= $expected_frag} assert {$frag >= $expected_frag}
set digest [r debug digest] set digest [debug_digest]
catch {r config set activedefrag yes} e catch {r config set activedefrag yes} e
if {[r config get activedefrag] eq "activedefrag yes"} { if {[r config get activedefrag] eq "activedefrag yes"} {
# wait for the active defrag to start working (decision once a second) # wait for the active defrag to start working (decision once a second)
@ -486,7 +486,7 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r
} }
# verify the data isn't corrupted or changed # verify the data isn't corrupted or changed
set newdigest [r debug digest] set newdigest [debug_digest]
assert {$digest eq $newdigest} assert {$digest eq $newdigest}
r save ;# saving an rdb iterates over all the data / pointers r save ;# saving an rdb iterates over all the data / pointers
} }

View File

@ -217,10 +217,10 @@ start_server {tags {"modules"}} {
r select 0 r select 0
set digest0 [r debug digest] set digest0 [debug_digest]
r select 1 r select 1
set digest1 [r debug digest] set digest1 [debug_digest]
assert_equal $digest0 $digest1 assert_equal $digest0 $digest1
} }

View File

@ -47,7 +47,7 @@ start_server {tags {"other"}} {
waitForBgsave r waitForBgsave r
r debug reload r debug reload
r get x r get x
} {10} {needs:save} } {10} {needs:debug needs:save}
test {SELECT an out of range DB} { test {SELECT an out of range DB} {
catch {r select 1000000} err catch {r select 1000000} err
@ -57,11 +57,11 @@ start_server {tags {"other"}} {
tags {consistency} { tags {consistency} {
proc check_consistency {dumpname code} { proc check_consistency {dumpname code} {
set dump [csvdump r] set dump [csvdump r]
set sha1 [r debug digest] set sha1 [debug_digest]
uplevel 1 $code uplevel 1 $code
set sha1_after [r debug digest] set sha1_after [debug_digest]
if {$sha1 eq $sha1_after} { if {$sha1 eq $sha1_after} {
return 1 return 1
} }
@ -92,7 +92,7 @@ start_server {tags {"other"}} {
r debug reload r debug reload
} }
} }
} {1} } {1} {needs:debug}
test {Same dataset digest if saving/reloading as AOF?} { test {Same dataset digest if saving/reloading as AOF?} {
if {$::ignoredigest} { if {$::ignoredigest} {
@ -172,9 +172,11 @@ start_server {tags {"other"}} {
set fd2 [socket [srv host] [srv port]] set fd2 [socket [srv host] [srv port]]
} }
fconfigure $fd2 -encoding binary -translation binary fconfigure $fd2 -encoding binary -translation binary
puts -nonewline $fd2 "SELECT 9\r\n" if {!$::singledb} {
flush $fd2 puts -nonewline $fd2 "SELECT 9\r\n"
gets $fd2 flush $fd2
gets $fd2
}
for {set i 0} {$i < 100000} {incr i} { for {set i 0} {$i < 100000} {incr i} {
set q {} set q {}
@ -328,7 +330,7 @@ start_server {tags {"other external:skip"}} {
# size is power of two and over 4098, so it is 8192 # size is power of two and over 4098, so it is 8192
r set k3 v3 r set k3 v3
assert_match "*table size: 8192*" [r debug HTSTATS 9] assert_match "*table size: 8192*" [r debug HTSTATS 9]
} {} {needs:local-process} } {} {needs:debug needs:local-process}
} }
proc read_proc_title {pid} { proc read_proc_title {pid} {

View File

@ -909,7 +909,7 @@ foreach cmdrepl {0 1} {
redis.call("incr","z") redis.call("incr","z")
} 0 } 0
wait_for_condition 50 100 { wait_for_condition 50 100 {
[r -1 debug digest] eq [r debug digest] [debug_digest -1] eq [debug_digest]
} else { } else {
fail "Master-Replica desync after Lua script using SELECT." fail "Master-Replica desync after Lua script using SELECT."
} }

View File

@ -1039,7 +1039,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
# Restore server and client state # Restore server and client state
r debug set-active-expire 1 r debug set-active-expire 1
r select 9 r select 9
} {OK} {singledb:skip} } {OK} {singledb:skip needs:debug}
foreach {pop} {BLPOP BLMPOP_LEFT} { foreach {pop} {BLPOP BLMPOP_LEFT} {
test "$pop when new key is moved into place" { test "$pop when new key is moved into place" {
@ -1211,6 +1211,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
{rpop mylist2{t} 3} {rpop mylist2{t} 3}
{set foo{t} bar} {set foo{t} bar}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
test {LPUSHX, RPUSHX - generic} { test {LPUSHX, RPUSHX - generic} {
@ -1632,6 +1633,7 @@ foreach {pop} {BLPOP BLMPOP_LEFT} {
{lpop mylist{t} 3} {lpop mylist{t} 3}
{rpop mylist2{t} 3} {rpop mylist2{t} 3}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
foreach {type large} [array get largevalue] { foreach {type large} [array get largevalue] {

View File

@ -173,6 +173,7 @@ start_server {tags {"string"}} {
{set foo bar} {set foo bar}
{del foo} {del foo}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
test {GETEX without argument does not propagate to replica} { test {GETEX without argument does not propagate to replica} {
@ -185,6 +186,7 @@ start_server {tags {"string"}} {
{set foo bar} {set foo bar}
{del foo} {del foo}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
test {MGET} { test {MGET} {

View File

@ -1219,6 +1219,7 @@ start_server {tags {"zset"}} {
{zpopmin myzset{t} 3} {zpopmin myzset{t} 3}
{zpopmax myzset2{t} 3} {zpopmax myzset2{t} 3}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
foreach resp {3 2} { foreach resp {3 2} {
@ -2048,6 +2049,7 @@ start_server {tags {"zset"}} {
{zpopmax myzset2{t} 3} {zpopmax myzset2{t} 3}
{set foo{t} bar} {set foo{t} bar}
} }
close_replication_stream $repl
} {} {needs:repl} } {} {needs:repl}
test {ZSET skiplist order consistency when elements are moved} { test {ZSET skiplist order consistency when elements are moved} {