mirror of
https://codeberg.org/redict/redict.git
synced 2025-01-22 08:08:53 -05:00
merge antirez/smallkeys
This commit is contained in:
commit
846d8b3ea5
2
Makefile
2
Makefile
@ -87,7 +87,7 @@ staticsymbols:
|
||||
tclsh utils/build-static-symbols.tcl > staticsymbols.h
|
||||
|
||||
test:
|
||||
tclsh8.5 tests/test_helper.tcl
|
||||
tclsh8.5 tests/test_helper.tcl --tags "${TAGS}"
|
||||
|
||||
bench:
|
||||
./redis-benchmark
|
||||
|
1
TODO
1
TODO
@ -4,6 +4,7 @@ Redis TODO and Roadmap
|
||||
VERSION 2.2 TODO (Optimizations and latency)
|
||||
============================================
|
||||
|
||||
* Support for syslog(3).
|
||||
* Lower the CPU usage.
|
||||
* Lower the RAM usage everywhere possible.
|
||||
* Specially encoded Sets (like Hashes).
|
||||
|
20
redis.conf
20
redis.conf
@ -195,6 +195,26 @@ appendonly no
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving the durability of Redis is
|
||||
# the same as "appendfsync none", that in pratical terms means that it is
|
||||
# possible to lost up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
################################ VIRTUAL MEMORY ###############################
|
||||
|
||||
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
||||
|
@ -7,6 +7,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"addReplyBulk",(unsigned long)addReplyBulk},
|
||||
{"addReplyBulkCString",(unsigned long)addReplyBulkCString},
|
||||
{"addReplyBulkLen",(unsigned long)addReplyBulkLen},
|
||||
{"addReplyBulkSds",(unsigned long)addReplyBulkSds},
|
||||
{"addReplyDouble",(unsigned long)addReplyDouble},
|
||||
{"addReplyLongLong",(unsigned long)addReplyLongLong},
|
||||
{"addReplySds",(unsigned long)addReplySds},
|
||||
@ -45,21 +46,27 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"createSortOperation",(unsigned long)createSortOperation},
|
||||
{"createStringObject",(unsigned long)createStringObject},
|
||||
{"createStringObjectFromLongLong",(unsigned long)createStringObjectFromLongLong},
|
||||
{"createVmPointer",(unsigned long)createVmPointer},
|
||||
{"createZsetObject",(unsigned long)createZsetObject},
|
||||
{"daemonize",(unsigned long)daemonize},
|
||||
{"dbAdd",(unsigned long)dbAdd},
|
||||
{"dbDelete",(unsigned long)dbDelete},
|
||||
{"dbExists",(unsigned long)dbExists},
|
||||
{"dbRandomKey",(unsigned long)dbRandomKey},
|
||||
{"dbReplace",(unsigned long)dbReplace},
|
||||
{"dbsizeCommand",(unsigned long)dbsizeCommand},
|
||||
{"debugCommand",(unsigned long)debugCommand},
|
||||
{"decrCommand",(unsigned long)decrCommand},
|
||||
{"decrRefCount",(unsigned long)decrRefCount},
|
||||
{"decrbyCommand",(unsigned long)decrbyCommand},
|
||||
{"delCommand",(unsigned long)delCommand},
|
||||
{"deleteIfSwapped",(unsigned long)deleteIfSwapped},
|
||||
{"deleteIfVolatile",(unsigned long)deleteIfVolatile},
|
||||
{"deleteKey",(unsigned long)deleteKey},
|
||||
{"dictEncObjKeyCompare",(unsigned long)dictEncObjKeyCompare},
|
||||
{"dictListDestructor",(unsigned long)dictListDestructor},
|
||||
{"dictObjKeyCompare",(unsigned long)dictObjKeyCompare},
|
||||
{"dictRedisObjectDestructor",(unsigned long)dictRedisObjectDestructor},
|
||||
{"dictSdsDestructor",(unsigned long)dictSdsDestructor},
|
||||
{"dictSdsKeyCompare",(unsigned long)dictSdsKeyCompare},
|
||||
{"dictVanillaFree",(unsigned long)dictVanillaFree},
|
||||
{"discardCommand",(unsigned long)discardCommand},
|
||||
{"dontWaitForSwappedKey",(unsigned long)dontWaitForSwappedKey},
|
||||
@ -196,6 +203,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"pushGenericCommand",(unsigned long)pushGenericCommand},
|
||||
{"qsortCompareSetsByCardinality",(unsigned long)qsortCompareSetsByCardinality},
|
||||
{"qsortCompareZsetopsrcByCardinality",(unsigned long)qsortCompareZsetopsrcByCardinality},
|
||||
{"qsortRedisCommands",(unsigned long)qsortRedisCommands},
|
||||
{"queueIOJob",(unsigned long)queueIOJob},
|
||||
{"queueMultiCommand",(unsigned long)queueMultiCommand},
|
||||
{"randomkeyCommand",(unsigned long)randomkeyCommand},
|
||||
@ -245,7 +253,6 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"scardCommand",(unsigned long)scardCommand},
|
||||
{"sdiffCommand",(unsigned long)sdiffCommand},
|
||||
{"sdiffstoreCommand",(unsigned long)sdiffstoreCommand},
|
||||
{"sdsDictKeyCompare",(unsigned long)sdsDictKeyCompare},
|
||||
{"sdscatrepr",(unsigned long)sdscatrepr},
|
||||
{"segvHandler",(unsigned long)segvHandler},
|
||||
{"selectCommand",(unsigned long)selectCommand},
|
||||
@ -269,6 +276,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"slaveofCommand",(unsigned long)slaveofCommand},
|
||||
{"smoveCommand",(unsigned long)smoveCommand},
|
||||
{"sortCommand",(unsigned long)sortCommand},
|
||||
{"sortCommandTable",(unsigned long)sortCommandTable},
|
||||
{"sortCompare",(unsigned long)sortCompare},
|
||||
{"spawnIOThread",(unsigned long)spawnIOThread},
|
||||
{"spopCommand",(unsigned long)spopCommand},
|
||||
@ -290,6 +298,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"syncWithMaster",(unsigned long)syncWithMaster},
|
||||
{"syncWrite",(unsigned long)syncWrite},
|
||||
{"touchWatchedKey",(unsigned long)touchWatchedKey},
|
||||
{"touchWatchedKeysOnFlush",(unsigned long)touchWatchedKeysOnFlush},
|
||||
{"tryFreeOneObjectFromFreelist",(unsigned long)tryFreeOneObjectFromFreelist},
|
||||
{"tryObjectEncoding",(unsigned long)tryObjectEncoding},
|
||||
{"tryResizeHashTables",(unsigned long)tryResizeHashTables},
|
||||
|
@ -1,4 +1,4 @@
|
||||
set defaults [list [list appendonly yes] [list appendfilename appendonly.aof]]
|
||||
set defaults { appendonly {yes} appendfilename {appendonly.aof} }
|
||||
set server_path [tmpdir server.aof]
|
||||
set aof_path "$server_path/appendonly.aof"
|
||||
|
||||
@ -16,65 +16,67 @@ proc create_aof {code} {
|
||||
|
||||
proc start_server_aof {overrides code} {
|
||||
upvar defaults defaults srv srv server_path server_path
|
||||
set _defaults $defaults
|
||||
set srv [start_server default.conf [lappend _defaults $overrides]]
|
||||
set config [concat $defaults $overrides]
|
||||
set srv [start_server [list overrides $config]]
|
||||
uplevel 1 $code
|
||||
kill_server $srv
|
||||
}
|
||||
|
||||
## Test the server doesn't start when the AOF contains an unfinished MULTI
|
||||
create_aof {
|
||||
append_to_aof [formatCommand set foo hello]
|
||||
append_to_aof [formatCommand multi]
|
||||
append_to_aof [formatCommand set bar world]
|
||||
}
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Unfinished MULTI: Server should not have been started} {
|
||||
is_alive $srv
|
||||
} {0}
|
||||
|
||||
test {Unfinished MULTI: Server should have logged an error} {
|
||||
exec cat [dict get $srv stdout] | tail -n1
|
||||
} {*Unexpected end of file reading the append only file*}
|
||||
}
|
||||
|
||||
## Test that the server exits when the AOF contains a short read
|
||||
create_aof {
|
||||
append_to_aof [formatCommand set foo hello]
|
||||
append_to_aof [string range [formatCommand set bar world] 0 end-1]
|
||||
}
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Short read: Server should not have been started} {
|
||||
is_alive $srv
|
||||
} {0}
|
||||
|
||||
test {Short read: Server should have logged an error} {
|
||||
exec cat [dict get $srv stdout] | tail -n1
|
||||
} {*Bad file format reading the append only file*}
|
||||
}
|
||||
|
||||
## Test that redis-check-aof indeed sees this AOF is not valid
|
||||
test {Short read: Utility should confirm the AOF is not valid} {
|
||||
catch {
|
||||
exec ./redis-check-aof $aof_path
|
||||
} str
|
||||
set _ $str
|
||||
} {*not valid*}
|
||||
|
||||
test {Short read: Utility should be able to fix the AOF} {
|
||||
exec echo y | ./redis-check-aof --fix $aof_path
|
||||
} {*Successfully truncated AOF*}
|
||||
|
||||
## Test that the server can be started using the truncated AOF
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Fixed AOF: Server should have been started} {
|
||||
is_alive $srv
|
||||
} {1}
|
||||
|
||||
test {Fixed AOF: Keyspace should contain values that were parsable} {
|
||||
set client [redis [dict get $srv host] [dict get $srv port]]
|
||||
list [$client get foo] [$client get bar]
|
||||
} {hello {}}
|
||||
tags {"aof"} {
|
||||
## Test the server doesn't start when the AOF contains an unfinished MULTI
|
||||
create_aof {
|
||||
append_to_aof [formatCommand set foo hello]
|
||||
append_to_aof [formatCommand multi]
|
||||
append_to_aof [formatCommand set bar world]
|
||||
}
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Unfinished MULTI: Server should not have been started} {
|
||||
is_alive $srv
|
||||
} {0}
|
||||
|
||||
test {Unfinished MULTI: Server should have logged an error} {
|
||||
exec cat [dict get $srv stdout] | tail -n1
|
||||
} {*Unexpected end of file reading the append only file*}
|
||||
}
|
||||
|
||||
## Test that the server exits when the AOF contains a short read
|
||||
create_aof {
|
||||
append_to_aof [formatCommand set foo hello]
|
||||
append_to_aof [string range [formatCommand set bar world] 0 end-1]
|
||||
}
|
||||
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Short read: Server should not have been started} {
|
||||
is_alive $srv
|
||||
} {0}
|
||||
|
||||
test {Short read: Server should have logged an error} {
|
||||
exec cat [dict get $srv stdout] | tail -n1
|
||||
} {*Bad file format reading the append only file*}
|
||||
}
|
||||
|
||||
## Test that redis-check-aof indeed sees this AOF is not valid
|
||||
test {Short read: Utility should confirm the AOF is not valid} {
|
||||
catch {
|
||||
exec ./redis-check-aof $aof_path
|
||||
} str
|
||||
set _ $str
|
||||
} {*not valid*}
|
||||
|
||||
test {Short read: Utility should be able to fix the AOF} {
|
||||
exec echo y | ./redis-check-aof --fix $aof_path
|
||||
} {*Successfully truncated AOF*}
|
||||
|
||||
## Test that the server can be started using the truncated AOF
|
||||
start_server_aof [list dir $server_path] {
|
||||
test {Fixed AOF: Server should have been started} {
|
||||
is_alive $srv
|
||||
} {1}
|
||||
|
||||
test {Fixed AOF: Keyspace should contain values that were parsable} {
|
||||
set client [redis [dict get $srv host] [dict get $srv port]]
|
||||
list [$client get foo] [$client get bar]
|
||||
} {hello {}}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"repl"}} {
|
||||
r set mykey foo
|
||||
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {Second server should have role master at first} {
|
||||
s role
|
||||
} {master}
|
||||
|
@ -1,3 +1,6 @@
|
||||
set ::global_overrides {}
|
||||
set ::tags {}
|
||||
|
||||
proc error_and_quit {config_file error} {
|
||||
puts "!!COULD NOT START REDIS-SERVER\n"
|
||||
puts "CONFIGURATION:"
|
||||
@ -27,11 +30,15 @@ proc kill_server config {
|
||||
set pid [dict get $config pid]
|
||||
|
||||
# check for leaks
|
||||
catch {
|
||||
if {[string match {*Darwin*} [exec uname -a]]} {
|
||||
test "Check for memory leaks (pid $pid)" {
|
||||
exec leaks $pid
|
||||
} {*0 leaks*}
|
||||
if {![dict exists $config "skipleaks"]} {
|
||||
catch {
|
||||
if {[string match {*Darwin*} [exec uname -a]]} {
|
||||
tags {"leaks"} {
|
||||
test "Check for memory leaks (pid $pid)" {
|
||||
exec leaks $pid
|
||||
} {*0 leaks*}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,9 +85,35 @@ proc ping_server {host port} {
|
||||
return $retval
|
||||
}
|
||||
|
||||
set ::global_overrides {}
|
||||
proc start_server {filename overrides {code undefined}} {
|
||||
set data [split [exec cat "tests/assets/$filename"] "\n"]
|
||||
# doesn't really belong here, but highly coupled to code in start_server
|
||||
proc tags {tags code} {
|
||||
set ::tags [concat $::tags $tags]
|
||||
uplevel 1 $code
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
}
|
||||
|
||||
proc start_server {options {code undefined}} {
|
||||
# setup defaults
|
||||
set baseconfig "default.conf"
|
||||
set overrides {}
|
||||
set tags {}
|
||||
|
||||
# parse options
|
||||
foreach {option value} $options {
|
||||
switch $option {
|
||||
"config" {
|
||||
set baseconfig $value }
|
||||
"overrides" {
|
||||
set overrides $value }
|
||||
"tags" {
|
||||
set tags $value
|
||||
set ::tags [concat $::tags $value] }
|
||||
default {
|
||||
error "Unknown option $option" }
|
||||
}
|
||||
}
|
||||
|
||||
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
|
||||
set config {}
|
||||
foreach line $data {
|
||||
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
|
||||
@ -98,9 +131,7 @@ proc start_server {filename overrides {code undefined}} {
|
||||
dict set config port [incr ::port]
|
||||
|
||||
# apply overrides from global space and arguments
|
||||
foreach override [concat $::global_overrides $overrides] {
|
||||
set directive [lrange $override 0 0]
|
||||
set arguments [lrange $override 1 end]
|
||||
foreach {directive arguments} [concat $::global_overrides $overrides] {
|
||||
dict set config $directive $arguments
|
||||
}
|
||||
|
||||
@ -177,19 +208,40 @@ proc start_server {filename overrides {code undefined}} {
|
||||
lappend ::servers $srv
|
||||
|
||||
# execute provided block
|
||||
set curnum $::testnum
|
||||
catch { uplevel 1 $code } err
|
||||
if {$curnum == $::testnum} {
|
||||
# don't check for leaks when no tests were executed
|
||||
dict set srv "skipleaks" 1
|
||||
}
|
||||
|
||||
# pop the server object
|
||||
set ::servers [lrange $::servers 0 end-1]
|
||||
|
||||
kill_server $srv
|
||||
|
||||
if {[string length $err] > 0} {
|
||||
# allow an exception to bubble up the call chain but still kill this
|
||||
# server, because we want to reuse the ports when the tests are re-run
|
||||
if {$err eq "exception"} {
|
||||
puts [format "Logged warnings (pid %d):" [dict get $srv "pid"]]
|
||||
set warnings [warnings_from_file [dict get $srv "stdout"]]
|
||||
if {[string length $warnings] > 0} {
|
||||
puts "$warnings"
|
||||
} else {
|
||||
puts "(none)"
|
||||
}
|
||||
# kill this server without checking for leaks
|
||||
dict set srv "skipleaks" 1
|
||||
kill_server $srv
|
||||
error "exception"
|
||||
} elseif {[string length $err] > 0} {
|
||||
puts "Error executing the suite, aborting..."
|
||||
puts $err
|
||||
exit 1
|
||||
}
|
||||
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
kill_server $srv
|
||||
} else {
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
set _ $srv
|
||||
}
|
||||
}
|
||||
|
@ -3,20 +3,34 @@ set ::failed 0
|
||||
set ::testnum 0
|
||||
|
||||
proc test {name code okpattern} {
|
||||
# abort if tagged with a tag to deny
|
||||
foreach tag $::denytags {
|
||||
if {[lsearch $::tags $tag] >= 0} {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
# check if tagged with at least 1 tag to allow when there *is* a list
|
||||
# of tags to allow, because default policy is to run everything
|
||||
if {[llength $::allowtags] > 0} {
|
||||
set matched 0
|
||||
foreach tag $::allowtags {
|
||||
if {[lsearch $::tags $tag] >= 0} {
|
||||
incr matched
|
||||
}
|
||||
}
|
||||
if {$matched < 1} {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
incr ::testnum
|
||||
# if {$::testnum < $::first || $::testnum > $::last} return
|
||||
puts -nonewline [format "#%03d %-68s " $::testnum $name]
|
||||
flush stdout
|
||||
if {[catch {set retval [uplevel 1 $code]} error]} {
|
||||
puts "ERROR\n\nLogged warnings:"
|
||||
foreach file [glob tests/tmp/server.[pid].*/stdout] {
|
||||
set warnings [warnings_from_file $file]
|
||||
if {[string length $warnings] > 0} {
|
||||
puts $warnings
|
||||
}
|
||||
}
|
||||
puts "Script died with $error"
|
||||
exit 1
|
||||
puts "EXCEPTION"
|
||||
puts "\nCaught error: $error"
|
||||
error "exception"
|
||||
}
|
||||
if {$okpattern eq $retval || [string match $okpattern $retval]} {
|
||||
puts "PASSED"
|
||||
|
@ -13,9 +13,10 @@ set ::host 127.0.0.1
|
||||
set ::port 16379
|
||||
set ::traceleaks 0
|
||||
set ::valgrind 0
|
||||
set ::denytags {}
|
||||
set ::allowtags {}
|
||||
|
||||
proc execute_tests name {
|
||||
set cur $::testnum
|
||||
source "tests/$name.tcl"
|
||||
}
|
||||
|
||||
@ -92,4 +93,31 @@ proc main {} {
|
||||
cleanup
|
||||
}
|
||||
|
||||
main
|
||||
# parse arguments
|
||||
for {set j 0} {$j < [llength $argv]} {incr j} {
|
||||
set opt [lindex $argv $j]
|
||||
set arg [lindex $argv [expr $j+1]]
|
||||
if {$opt eq {--tags}} {
|
||||
foreach tag $arg {
|
||||
if {[string index $tag 0] eq "-"} {
|
||||
lappend ::denytags [string range $tag 1 end]
|
||||
} else {
|
||||
lappend ::allowtags $tag
|
||||
}
|
||||
}
|
||||
incr j
|
||||
} else {
|
||||
puts "Wrong argument: $opt"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
if {[catch { main } err]} {
|
||||
if {[string length $err] > 0} {
|
||||
# only display error when not generated by the test suite
|
||||
if {$err ne "exception"} {
|
||||
puts $err
|
||||
}
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {{requirepass foobar}} {
|
||||
start_server {tags {"auth"} overrides {requirepass foobar}} {
|
||||
test {AUTH fails when a wrong password is given} {
|
||||
catch {r auth wrong!} err
|
||||
format $err
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"basic"}} {
|
||||
test {DEL all keys to start with a clean DB} {
|
||||
foreach key [r keys *] {r del $key}
|
||||
r dbsize
|
||||
@ -52,46 +52,48 @@ start_server default.conf {} {
|
||||
r get foo
|
||||
} [string repeat "abcd" 1000000]
|
||||
|
||||
test {Very big payload random access} {
|
||||
set err {}
|
||||
array set payload {}
|
||||
for {set j 0} {$j < 100} {incr j} {
|
||||
set size [expr 1+[randomInt 100000]]
|
||||
set buf [string repeat "pl-$j" $size]
|
||||
set payload($j) $buf
|
||||
r set bigpayload_$j $buf
|
||||
}
|
||||
for {set j 0} {$j < 1000} {incr j} {
|
||||
set index [randomInt 100]
|
||||
set buf [r get bigpayload_$index]
|
||||
if {$buf != $payload($index)} {
|
||||
set err "Values differ: I set '$payload($index)' but I read back '$buf'"
|
||||
break
|
||||
tags {"slow"} {
|
||||
test {Very big payload random access} {
|
||||
set err {}
|
||||
array set payload {}
|
||||
for {set j 0} {$j < 100} {incr j} {
|
||||
set size [expr 1+[randomInt 100000]]
|
||||
set buf [string repeat "pl-$j" $size]
|
||||
set payload($j) $buf
|
||||
r set bigpayload_$j $buf
|
||||
}
|
||||
}
|
||||
unset payload
|
||||
set _ $err
|
||||
} {}
|
||||
|
||||
test {SET 10000 numeric keys and access all them in reverse order} {
|
||||
set err {}
|
||||
for {set x 0} {$x < 10000} {incr x} {
|
||||
r set $x $x
|
||||
}
|
||||
set sum 0
|
||||
for {set x 9999} {$x >= 0} {incr x -1} {
|
||||
set val [r get $x]
|
||||
if {$val ne $x} {
|
||||
set err "Eleemnt at position $x is $val instead of $x"
|
||||
break
|
||||
for {set j 0} {$j < 1000} {incr j} {
|
||||
set index [randomInt 100]
|
||||
set buf [r get bigpayload_$index]
|
||||
if {$buf != $payload($index)} {
|
||||
set err "Values differ: I set '$payload($index)' but I read back '$buf'"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
unset payload
|
||||
set _ $err
|
||||
} {}
|
||||
|
||||
test {DBSIZE should be 10101 now} {
|
||||
r dbsize
|
||||
} {10101}
|
||||
test {SET 10000 numeric keys and access all them in reverse order} {
|
||||
set err {}
|
||||
for {set x 0} {$x < 10000} {incr x} {
|
||||
r set $x $x
|
||||
}
|
||||
set sum 0
|
||||
for {set x 9999} {$x >= 0} {incr x -1} {
|
||||
set val [r get $x]
|
||||
if {$val ne $x} {
|
||||
set err "Eleemnt at position $x is $val instead of $x"
|
||||
break
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
|
||||
test {DBSIZE should be 10101 now} {
|
||||
r dbsize
|
||||
} {10101}
|
||||
}
|
||||
|
||||
test {INCR against non existing key} {
|
||||
set res {}
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"cas"}} {
|
||||
test {EXEC works on WATCHed key not modified} {
|
||||
r watch x y z
|
||||
r watch k
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"expire"}} {
|
||||
test {EXPIRE - don't set timeouts multiple times} {
|
||||
r set x foobar
|
||||
set v1 [r expire x 5]
|
||||
@ -12,10 +12,12 @@ start_server default.conf {} {
|
||||
r get x
|
||||
} {foobar}
|
||||
|
||||
test {EXPIRE - After 6 seconds the key should no longer be here} {
|
||||
after 6000
|
||||
list [r get x] [r exists x]
|
||||
} {{} 0}
|
||||
tags {"slow"} {
|
||||
test {EXPIRE - After 6 seconds the key should no longer be here} {
|
||||
after 6000
|
||||
list [r get x] [r exists x]
|
||||
} {{} 0}
|
||||
}
|
||||
|
||||
test {EXPIRE - Delete on write policy} {
|
||||
r del x
|
||||
@ -46,10 +48,12 @@ start_server default.conf {} {
|
||||
r get y
|
||||
} {foo}
|
||||
|
||||
test {SETEX - Wait for the key to expire} {
|
||||
after 3000
|
||||
r get y
|
||||
} {}
|
||||
tags {"slow"} {
|
||||
test {SETEX - Wait for the key to expire} {
|
||||
after 3000
|
||||
r get y
|
||||
} {}
|
||||
}
|
||||
|
||||
test {SETEX - Wrong time parameter} {
|
||||
catch {r setex z -10 foo} e
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {SAVE - make sure there are all the types as values} {
|
||||
# Wait for a background saving in progress to terminate
|
||||
waitForBgsave r
|
||||
@ -12,20 +12,22 @@ start_server default.conf {} {
|
||||
r save
|
||||
} {OK}
|
||||
|
||||
foreach fuzztype {binary alpha compr} {
|
||||
test "FUZZ stresser with data model $fuzztype" {
|
||||
set err 0
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
set fuzz [randstring 0 512 $fuzztype]
|
||||
r set foo $fuzz
|
||||
set got [r get foo]
|
||||
if {$got ne $fuzz} {
|
||||
set err [list $fuzz $got]
|
||||
break
|
||||
tags {"slow"} {
|
||||
foreach fuzztype {binary alpha compr} {
|
||||
test "FUZZ stresser with data model $fuzztype" {
|
||||
set err 0
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
set fuzz [randstring 0 512 $fuzztype]
|
||||
r set foo $fuzz
|
||||
set got [r get foo]
|
||||
if {$got ne $fuzz} {
|
||||
set err [list $fuzz $got]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {0}
|
||||
set _ $err
|
||||
} {0}
|
||||
}
|
||||
}
|
||||
|
||||
test {BGSAVE} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {Handle an empty query well} {
|
||||
set fd [r channel]
|
||||
puts -nonewline $fd "\r\n"
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"sort"}} {
|
||||
test {SORT ALPHA against integer encoded strings} {
|
||||
r del mylist
|
||||
r lpush mylist 2
|
||||
@ -8,130 +8,132 @@ start_server default.conf {} {
|
||||
r sort mylist alpha
|
||||
} {1 10 2 3}
|
||||
|
||||
test {Create a random list and a random set} {
|
||||
set tosort {}
|
||||
array set seenrand {}
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
while 1 {
|
||||
# Make sure all the weights are different because
|
||||
# Redis does not use a stable sort but Tcl does.
|
||||
randpath {
|
||||
set rint [expr int(rand()*1000000)]
|
||||
} {
|
||||
set rint [expr rand()]
|
||||
}
|
||||
if {![info exists seenrand($rint)]} break
|
||||
}
|
||||
set seenrand($rint) x
|
||||
r lpush tosort $i
|
||||
r sadd tosort-set $i
|
||||
r set weight_$i $rint
|
||||
r hset wobj_$i weight $rint
|
||||
lappend tosort [list $i $rint]
|
||||
}
|
||||
set sorted [lsort -index 1 -real $tosort]
|
||||
tags {"slow"} {
|
||||
set res {}
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
lappend res [lindex $sorted $i 0]
|
||||
}
|
||||
format {}
|
||||
} {}
|
||||
|
||||
test {SORT with BY against the newly created list} {
|
||||
r sort tosort {BY weight_*}
|
||||
} $res
|
||||
|
||||
test {SORT with BY (hash field) against the newly created list} {
|
||||
r sort tosort {BY wobj_*->weight}
|
||||
} $res
|
||||
|
||||
test {SORT with GET (key+hash) with sanity check of each element (list)} {
|
||||
set err {}
|
||||
set l1 [r sort tosort GET # GET weight_*]
|
||||
set l2 [r sort tosort GET # GET wobj_*->weight]
|
||||
foreach {id1 w1} $l1 {id2 w2} $l2 {
|
||||
set realweight [r get weight_$id1]
|
||||
if {$id1 != $id2} {
|
||||
set err "ID mismatch $id1 != $id2"
|
||||
break
|
||||
test {Create a random list and a random set} {
|
||||
set tosort {}
|
||||
array set seenrand {}
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
while 1 {
|
||||
# Make sure all the weights are different because
|
||||
# Redis does not use a stable sort but Tcl does.
|
||||
randpath {
|
||||
set rint [expr int(rand()*1000000)]
|
||||
} {
|
||||
set rint [expr rand()]
|
||||
}
|
||||
if {![info exists seenrand($rint)]} break
|
||||
}
|
||||
set seenrand($rint) x
|
||||
r lpush tosort $i
|
||||
r sadd tosort-set $i
|
||||
r set weight_$i $rint
|
||||
r hset wobj_$i weight $rint
|
||||
lappend tosort [list $i $rint]
|
||||
}
|
||||
if {$realweight != $w1 || $realweight != $w2} {
|
||||
set err "Weights mismatch! w1: $w1 w2: $w2 real: $realweight"
|
||||
break
|
||||
set sorted [lsort -index 1 -real $tosort]
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
lappend res [lindex $sorted $i 0]
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
format {}
|
||||
} {}
|
||||
|
||||
test {SORT with BY, but against the newly created set} {
|
||||
r sort tosort-set {BY weight_*}
|
||||
} $res
|
||||
test {SORT with BY against the newly created list} {
|
||||
r sort tosort {BY weight_*}
|
||||
} $res
|
||||
|
||||
test {SORT with BY (hash field), but against the newly created set} {
|
||||
r sort tosort-set {BY wobj_*->weight}
|
||||
} $res
|
||||
test {SORT with BY (hash field) against the newly created list} {
|
||||
r sort tosort {BY wobj_*->weight}
|
||||
} $res
|
||||
|
||||
test {SORT with BY and STORE against the newly created list} {
|
||||
r sort tosort {BY weight_*} store sort-res
|
||||
r lrange sort-res 0 -1
|
||||
} $res
|
||||
test {SORT with GET (key+hash) with sanity check of each element (list)} {
|
||||
set err {}
|
||||
set l1 [r sort tosort GET # GET weight_*]
|
||||
set l2 [r sort tosort GET # GET wobj_*->weight]
|
||||
foreach {id1 w1} $l1 {id2 w2} $l2 {
|
||||
set realweight [r get weight_$id1]
|
||||
if {$id1 != $id2} {
|
||||
set err "ID mismatch $id1 != $id2"
|
||||
break
|
||||
}
|
||||
if {$realweight != $w1 || $realweight != $w2} {
|
||||
set err "Weights mismatch! w1: $w1 w2: $w2 real: $realweight"
|
||||
break
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
|
||||
test {SORT with BY (hash field) and STORE against the newly created list} {
|
||||
r sort tosort {BY wobj_*->weight} store sort-res
|
||||
r lrange sort-res 0 -1
|
||||
} $res
|
||||
test {SORT with BY, but against the newly created set} {
|
||||
r sort tosort-set {BY weight_*}
|
||||
} $res
|
||||
|
||||
test {SORT direct, numeric, against the newly created list} {
|
||||
r sort tosort
|
||||
} [lsort -integer $res]
|
||||
test {SORT with BY (hash field), but against the newly created set} {
|
||||
r sort tosort-set {BY wobj_*->weight}
|
||||
} $res
|
||||
|
||||
test {SORT decreasing sort} {
|
||||
r sort tosort {DESC}
|
||||
} [lsort -decreasing -integer $res]
|
||||
test {SORT with BY and STORE against the newly created list} {
|
||||
r sort tosort {BY weight_*} store sort-res
|
||||
r lrange sort-res 0 -1
|
||||
} $res
|
||||
|
||||
test {SORT speed, sorting 10000 elements list using BY, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY weight_* LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
test {SORT with BY (hash field) and STORE against the newly created list} {
|
||||
r sort tosort {BY wobj_*->weight} store sort-res
|
||||
r lrange sort-res 0 -1
|
||||
} $res
|
||||
|
||||
test {SORT speed, as above but against hash field} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY wobj_*->weight LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
test {SORT direct, numeric, against the newly created list} {
|
||||
r sort tosort
|
||||
} [lsort -integer $res]
|
||||
|
||||
test {SORT speed, sorting 10000 elements list directly, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
test {SORT decreasing sort} {
|
||||
r sort tosort {DESC}
|
||||
} [lsort -decreasing -integer $res]
|
||||
|
||||
test {SORT speed, pseudo-sorting 10000 elements list, BY <const>, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY nokey LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
test {SORT speed, sorting 10000 elements list using BY, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY weight_* LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
|
||||
test {SORT speed, as above but against hash field} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY wobj_*->weight LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
|
||||
test {SORT speed, sorting 10000 elements list directly, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
|
||||
test {SORT speed, pseudo-sorting 10000 elements list, BY <const>, 100 times} {
|
||||
set start [clock clicks -milliseconds]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set sorted [r sort tosort {BY nokey LIMIT 0 10}]
|
||||
}
|
||||
set elapsed [expr [clock clicks -milliseconds]-$start]
|
||||
puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
}
|
||||
|
||||
test {SORT regression for issue #19, sorting floats} {
|
||||
r flushdb
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"hash"}} {
|
||||
test {HSET/HLEN - Small hash creation} {
|
||||
array set smallhash {}
|
||||
for {set i 0} {$i < 8} {incr i} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"list"}} {
|
||||
test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} {
|
||||
set res [r lpush mylist a]
|
||||
append res [r lpush mylist b]
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"set"}} {
|
||||
test {SADD, SCARD, SISMEMBER, SMEMBERS basics} {
|
||||
r sadd myset foo
|
||||
r sadd myset bar
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"zset"}} {
|
||||
test {ZSET basic ZADD and score update} {
|
||||
r zadd ztmp 10 x
|
||||
r zadd ztmp 20 y
|
||||
@ -162,85 +162,87 @@ start_server default.conf {} {
|
||||
r zrangebyscore zset 2 4 withscores
|
||||
} {b 2 c 3 d 4}
|
||||
|
||||
test {ZRANGEBYSCORE fuzzy test, 100 ranges in 1000 elements sorted set} {
|
||||
set err {}
|
||||
r del zset
|
||||
for {set i 0} {$i < 1000} {incr i} {
|
||||
r zadd zset [expr rand()] $i
|
||||
}
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set min [expr rand()]
|
||||
set max [expr rand()]
|
||||
if {$min > $max} {
|
||||
set aux $min
|
||||
set min $max
|
||||
set max $aux
|
||||
tags {"slow"} {
|
||||
test {ZRANGEBYSCORE fuzzy test, 100 ranges in 1000 elements sorted set} {
|
||||
set err {}
|
||||
r del zset
|
||||
for {set i 0} {$i < 1000} {incr i} {
|
||||
r zadd zset [expr rand()] $i
|
||||
}
|
||||
set low [r zrangebyscore zset -inf $min]
|
||||
set ok [r zrangebyscore zset $min $max]
|
||||
set high [r zrangebyscore zset $max +inf]
|
||||
set lowx [r zrangebyscore zset -inf ($min]
|
||||
set okx [r zrangebyscore zset ($min ($max]
|
||||
set highx [r zrangebyscore zset ($max +inf]
|
||||
for {set i 0} {$i < 100} {incr i} {
|
||||
set min [expr rand()]
|
||||
set max [expr rand()]
|
||||
if {$min > $max} {
|
||||
set aux $min
|
||||
set min $max
|
||||
set max $aux
|
||||
}
|
||||
set low [r zrangebyscore zset -inf $min]
|
||||
set ok [r zrangebyscore zset $min $max]
|
||||
set high [r zrangebyscore zset $max +inf]
|
||||
set lowx [r zrangebyscore zset -inf ($min]
|
||||
set okx [r zrangebyscore zset ($min ($max]
|
||||
set highx [r zrangebyscore zset ($max +inf]
|
||||
|
||||
if {[r zcount zset -inf $min] != [llength $low]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset $min $max] != [llength $ok]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset $max +inf] != [llength $high]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset -inf ($min] != [llength $lowx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset ($min ($max] != [llength $okx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset ($max +inf] != [llength $highx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset -inf $min] != [llength $low]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset $min $max] != [llength $ok]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset $max +inf] != [llength $high]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset -inf ($min] != [llength $lowx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset ($min ($max] != [llength $okx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
if {[r zcount zset ($max +inf] != [llength $highx]} {
|
||||
append err "Error, len does not match zcount\n"
|
||||
}
|
||||
|
||||
foreach x $low {
|
||||
set score [r zscore zset $x]
|
||||
if {$score > $min} {
|
||||
append err "Error, score for $x is $score > $min\n"
|
||||
foreach x $low {
|
||||
set score [r zscore zset $x]
|
||||
if {$score > $min} {
|
||||
append err "Error, score for $x is $score > $min\n"
|
||||
}
|
||||
}
|
||||
foreach x $lowx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score >= $min} {
|
||||
append err "Error, score for $x is $score >= $min\n"
|
||||
}
|
||||
}
|
||||
foreach x $ok {
|
||||
set score [r zscore zset $x]
|
||||
if {$score < $min || $score > $max} {
|
||||
append err "Error, score for $x is $score outside $min-$max range\n"
|
||||
}
|
||||
}
|
||||
foreach x $okx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score <= $min || $score >= $max} {
|
||||
append err "Error, score for $x is $score outside $min-$max open range\n"
|
||||
}
|
||||
}
|
||||
foreach x $high {
|
||||
set score [r zscore zset $x]
|
||||
if {$score < $max} {
|
||||
append err "Error, score for $x is $score < $max\n"
|
||||
}
|
||||
}
|
||||
foreach x $highx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score <= $max} {
|
||||
append err "Error, score for $x is $score <= $max\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
foreach x $lowx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score >= $min} {
|
||||
append err "Error, score for $x is $score >= $min\n"
|
||||
}
|
||||
}
|
||||
foreach x $ok {
|
||||
set score [r zscore zset $x]
|
||||
if {$score < $min || $score > $max} {
|
||||
append err "Error, score for $x is $score outside $min-$max range\n"
|
||||
}
|
||||
}
|
||||
foreach x $okx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score <= $min || $score >= $max} {
|
||||
append err "Error, score for $x is $score outside $min-$max open range\n"
|
||||
}
|
||||
}
|
||||
foreach x $high {
|
||||
set score [r zscore zset $x]
|
||||
if {$score < $max} {
|
||||
append err "Error, score for $x is $score < $max\n"
|
||||
}
|
||||
}
|
||||
foreach x $highx {
|
||||
set score [r zscore zset $x]
|
||||
if {$score <= $max} {
|
||||
append err "Error, score for $x is $score <= $max\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
set _ $err
|
||||
} {}
|
||||
}
|
||||
|
||||
test {ZRANGEBYSCORE with LIMIT} {
|
||||
r del zset
|
||||
@ -356,47 +358,49 @@ start_server default.conf {} {
|
||||
list [r zinterstore zsetc 2 zseta zsetb aggregate max] [r zrange zsetc 0 -1 withscores]
|
||||
} {2 {b 2 c 3}}
|
||||
|
||||
test {ZSETs skiplist implementation backlink consistency test} {
|
||||
set diff 0
|
||||
set elements 10000
|
||||
for {set j 0} {$j < $elements} {incr j} {
|
||||
r zadd myzset [expr rand()] "Element-$j"
|
||||
r zrem myzset "Element-[expr int(rand()*$elements)]"
|
||||
}
|
||||
set l1 [r zrange myzset 0 -1]
|
||||
set l2 [r zrevrange myzset 0 -1]
|
||||
for {set j 0} {$j < [llength $l1]} {incr j} {
|
||||
if {[lindex $l1 $j] ne [lindex $l2 end-$j]} {
|
||||
incr diff
|
||||
tags {"slow"} {
|
||||
test {ZSETs skiplist implementation backlink consistency test} {
|
||||
set diff 0
|
||||
set elements 10000
|
||||
for {set j 0} {$j < $elements} {incr j} {
|
||||
r zadd myzset [expr rand()] "Element-$j"
|
||||
r zrem myzset "Element-[expr int(rand()*$elements)]"
|
||||
}
|
||||
}
|
||||
format $diff
|
||||
} {0}
|
||||
|
||||
test {ZSETs ZRANK augmented skip list stress testing} {
|
||||
set err {}
|
||||
r del myzset
|
||||
for {set k 0} {$k < 10000} {incr k} {
|
||||
set i [expr {$k%1000}]
|
||||
if {[expr rand()] < .2} {
|
||||
r zrem myzset $i
|
||||
} else {
|
||||
set score [expr rand()]
|
||||
r zadd myzset $score $i
|
||||
}
|
||||
set card [r zcard myzset]
|
||||
if {$card > 0} {
|
||||
set index [randomInt $card]
|
||||
set ele [lindex [r zrange myzset $index $index] 0]
|
||||
set rank [r zrank myzset $ele]
|
||||
if {$rank != $index} {
|
||||
set err "$ele RANK is wrong! ($rank != $index)"
|
||||
break
|
||||
set l1 [r zrange myzset 0 -1]
|
||||
set l2 [r zrevrange myzset 0 -1]
|
||||
for {set j 0} {$j < [llength $l1]} {incr j} {
|
||||
if {[lindex $l1 $j] ne [lindex $l2 end-$j]} {
|
||||
incr diff
|
||||
}
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
format $diff
|
||||
} {0}
|
||||
|
||||
test {ZSETs ZRANK augmented skip list stress testing} {
|
||||
set err {}
|
||||
r del myzset
|
||||
for {set k 0} {$k < 10000} {incr k} {
|
||||
set i [expr {$k%1000}]
|
||||
if {[expr rand()] < .2} {
|
||||
r zrem myzset $i
|
||||
} else {
|
||||
set score [expr rand()]
|
||||
r zadd myzset $score $i
|
||||
}
|
||||
set card [r zcard myzset]
|
||||
if {$card > 0} {
|
||||
set index [randomInt $card]
|
||||
set ele [lindex [r zrange myzset $index $index] 0]
|
||||
set rank [r zrank myzset $ele]
|
||||
if {$rank != $index} {
|
||||
set err "$ele RANK is wrong! ($rank != $index)"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
}
|
||||
|
||||
test {ZSET element can't be set to nan with ZADD} {
|
||||
set e {}
|
||||
|
Loading…
Reference in New Issue
Block a user