mirror of
https://codeberg.org/redict/redict.git
synced 2025-01-22 08:08:53 -05:00
merge antirez/smallkeys
This commit is contained in:
commit
846d8b3ea5
2
Makefile
2
Makefile
@ -87,7 +87,7 @@ staticsymbols:
|
||||
tclsh utils/build-static-symbols.tcl > staticsymbols.h
|
||||
|
||||
test:
|
||||
tclsh8.5 tests/test_helper.tcl
|
||||
tclsh8.5 tests/test_helper.tcl --tags "${TAGS}"
|
||||
|
||||
bench:
|
||||
./redis-benchmark
|
||||
|
1
TODO
1
TODO
@ -4,6 +4,7 @@ Redis TODO and Roadmap
|
||||
VERSION 2.2 TODO (Optimizations and latency)
|
||||
============================================
|
||||
|
||||
* Support for syslog(3).
|
||||
* Lower the CPU usage.
|
||||
* Lower the RAM usage everywhere possible.
|
||||
* Specially encoded Sets (like Hashes).
|
||||
|
20
redis.conf
20
redis.conf
@ -195,6 +195,26 @@ appendonly no
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving the durability of Redis is
|
||||
# the same as "appendfsync none", that in pratical terms means that it is
|
||||
# possible to lost up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
################################ VIRTUAL MEMORY ###############################
|
||||
|
||||
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
||||
|
@ -7,6 +7,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"addReplyBulk",(unsigned long)addReplyBulk},
|
||||
{"addReplyBulkCString",(unsigned long)addReplyBulkCString},
|
||||
{"addReplyBulkLen",(unsigned long)addReplyBulkLen},
|
||||
{"addReplyBulkSds",(unsigned long)addReplyBulkSds},
|
||||
{"addReplyDouble",(unsigned long)addReplyDouble},
|
||||
{"addReplyLongLong",(unsigned long)addReplyLongLong},
|
||||
{"addReplySds",(unsigned long)addReplySds},
|
||||
@ -45,21 +46,27 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"createSortOperation",(unsigned long)createSortOperation},
|
||||
{"createStringObject",(unsigned long)createStringObject},
|
||||
{"createStringObjectFromLongLong",(unsigned long)createStringObjectFromLongLong},
|
||||
{"createVmPointer",(unsigned long)createVmPointer},
|
||||
{"createZsetObject",(unsigned long)createZsetObject},
|
||||
{"daemonize",(unsigned long)daemonize},
|
||||
{"dbAdd",(unsigned long)dbAdd},
|
||||
{"dbDelete",(unsigned long)dbDelete},
|
||||
{"dbExists",(unsigned long)dbExists},
|
||||
{"dbRandomKey",(unsigned long)dbRandomKey},
|
||||
{"dbReplace",(unsigned long)dbReplace},
|
||||
{"dbsizeCommand",(unsigned long)dbsizeCommand},
|
||||
{"debugCommand",(unsigned long)debugCommand},
|
||||
{"decrCommand",(unsigned long)decrCommand},
|
||||
{"decrRefCount",(unsigned long)decrRefCount},
|
||||
{"decrbyCommand",(unsigned long)decrbyCommand},
|
||||
{"delCommand",(unsigned long)delCommand},
|
||||
{"deleteIfSwapped",(unsigned long)deleteIfSwapped},
|
||||
{"deleteIfVolatile",(unsigned long)deleteIfVolatile},
|
||||
{"deleteKey",(unsigned long)deleteKey},
|
||||
{"dictEncObjKeyCompare",(unsigned long)dictEncObjKeyCompare},
|
||||
{"dictListDestructor",(unsigned long)dictListDestructor},
|
||||
{"dictObjKeyCompare",(unsigned long)dictObjKeyCompare},
|
||||
{"dictRedisObjectDestructor",(unsigned long)dictRedisObjectDestructor},
|
||||
{"dictSdsDestructor",(unsigned long)dictSdsDestructor},
|
||||
{"dictSdsKeyCompare",(unsigned long)dictSdsKeyCompare},
|
||||
{"dictVanillaFree",(unsigned long)dictVanillaFree},
|
||||
{"discardCommand",(unsigned long)discardCommand},
|
||||
{"dontWaitForSwappedKey",(unsigned long)dontWaitForSwappedKey},
|
||||
@ -196,6 +203,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"pushGenericCommand",(unsigned long)pushGenericCommand},
|
||||
{"qsortCompareSetsByCardinality",(unsigned long)qsortCompareSetsByCardinality},
|
||||
{"qsortCompareZsetopsrcByCardinality",(unsigned long)qsortCompareZsetopsrcByCardinality},
|
||||
{"qsortRedisCommands",(unsigned long)qsortRedisCommands},
|
||||
{"queueIOJob",(unsigned long)queueIOJob},
|
||||
{"queueMultiCommand",(unsigned long)queueMultiCommand},
|
||||
{"randomkeyCommand",(unsigned long)randomkeyCommand},
|
||||
@ -245,7 +253,6 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"scardCommand",(unsigned long)scardCommand},
|
||||
{"sdiffCommand",(unsigned long)sdiffCommand},
|
||||
{"sdiffstoreCommand",(unsigned long)sdiffstoreCommand},
|
||||
{"sdsDictKeyCompare",(unsigned long)sdsDictKeyCompare},
|
||||
{"sdscatrepr",(unsigned long)sdscatrepr},
|
||||
{"segvHandler",(unsigned long)segvHandler},
|
||||
{"selectCommand",(unsigned long)selectCommand},
|
||||
@ -269,6 +276,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"slaveofCommand",(unsigned long)slaveofCommand},
|
||||
{"smoveCommand",(unsigned long)smoveCommand},
|
||||
{"sortCommand",(unsigned long)sortCommand},
|
||||
{"sortCommandTable",(unsigned long)sortCommandTable},
|
||||
{"sortCompare",(unsigned long)sortCompare},
|
||||
{"spawnIOThread",(unsigned long)spawnIOThread},
|
||||
{"spopCommand",(unsigned long)spopCommand},
|
||||
@ -290,6 +298,7 @@ static struct redisFunctionSym symsTable[] = {
|
||||
{"syncWithMaster",(unsigned long)syncWithMaster},
|
||||
{"syncWrite",(unsigned long)syncWrite},
|
||||
{"touchWatchedKey",(unsigned long)touchWatchedKey},
|
||||
{"touchWatchedKeysOnFlush",(unsigned long)touchWatchedKeysOnFlush},
|
||||
{"tryFreeOneObjectFromFreelist",(unsigned long)tryFreeOneObjectFromFreelist},
|
||||
{"tryObjectEncoding",(unsigned long)tryObjectEncoding},
|
||||
{"tryResizeHashTables",(unsigned long)tryResizeHashTables},
|
||||
|
@ -1,4 +1,4 @@
|
||||
set defaults [list [list appendonly yes] [list appendfilename appendonly.aof]]
|
||||
set defaults { appendonly {yes} appendfilename {appendonly.aof} }
|
||||
set server_path [tmpdir server.aof]
|
||||
set aof_path "$server_path/appendonly.aof"
|
||||
|
||||
@ -16,12 +16,13 @@ proc create_aof {code} {
|
||||
|
||||
proc start_server_aof {overrides code} {
|
||||
upvar defaults defaults srv srv server_path server_path
|
||||
set _defaults $defaults
|
||||
set srv [start_server default.conf [lappend _defaults $overrides]]
|
||||
set config [concat $defaults $overrides]
|
||||
set srv [start_server [list overrides $config]]
|
||||
uplevel 1 $code
|
||||
kill_server $srv
|
||||
}
|
||||
|
||||
tags {"aof"} {
|
||||
## Test the server doesn't start when the AOF contains an unfinished MULTI
|
||||
create_aof {
|
||||
append_to_aof [formatCommand set foo hello]
|
||||
@ -78,3 +79,4 @@ start_server_aof [list dir $server_path] {
|
||||
list [$client get foo] [$client get bar]
|
||||
} {hello {}}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"repl"}} {
|
||||
r set mykey foo
|
||||
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {Second server should have role master at first} {
|
||||
s role
|
||||
} {master}
|
||||
|
@ -1,3 +1,6 @@
|
||||
set ::global_overrides {}
|
||||
set ::tags {}
|
||||
|
||||
proc error_and_quit {config_file error} {
|
||||
puts "!!COULD NOT START REDIS-SERVER\n"
|
||||
puts "CONFIGURATION:"
|
||||
@ -27,13 +30,17 @@ proc kill_server config {
|
||||
set pid [dict get $config pid]
|
||||
|
||||
# check for leaks
|
||||
if {![dict exists $config "skipleaks"]} {
|
||||
catch {
|
||||
if {[string match {*Darwin*} [exec uname -a]]} {
|
||||
tags {"leaks"} {
|
||||
test "Check for memory leaks (pid $pid)" {
|
||||
exec leaks $pid
|
||||
} {*0 leaks*}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# kill server and wait for the process to be totally exited
|
||||
while {[is_alive $config]} {
|
||||
@ -78,9 +85,35 @@ proc ping_server {host port} {
|
||||
return $retval
|
||||
}
|
||||
|
||||
set ::global_overrides {}
|
||||
proc start_server {filename overrides {code undefined}} {
|
||||
set data [split [exec cat "tests/assets/$filename"] "\n"]
|
||||
# doesn't really belong here, but highly coupled to code in start_server
|
||||
proc tags {tags code} {
|
||||
set ::tags [concat $::tags $tags]
|
||||
uplevel 1 $code
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
}
|
||||
|
||||
proc start_server {options {code undefined}} {
|
||||
# setup defaults
|
||||
set baseconfig "default.conf"
|
||||
set overrides {}
|
||||
set tags {}
|
||||
|
||||
# parse options
|
||||
foreach {option value} $options {
|
||||
switch $option {
|
||||
"config" {
|
||||
set baseconfig $value }
|
||||
"overrides" {
|
||||
set overrides $value }
|
||||
"tags" {
|
||||
set tags $value
|
||||
set ::tags [concat $::tags $value] }
|
||||
default {
|
||||
error "Unknown option $option" }
|
||||
}
|
||||
}
|
||||
|
||||
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
|
||||
set config {}
|
||||
foreach line $data {
|
||||
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
|
||||
@ -98,9 +131,7 @@ proc start_server {filename overrides {code undefined}} {
|
||||
dict set config port [incr ::port]
|
||||
|
||||
# apply overrides from global space and arguments
|
||||
foreach override [concat $::global_overrides $overrides] {
|
||||
set directive [lrange $override 0 0]
|
||||
set arguments [lrange $override 1 end]
|
||||
foreach {directive arguments} [concat $::global_overrides $overrides] {
|
||||
dict set config $directive $arguments
|
||||
}
|
||||
|
||||
@ -177,19 +208,40 @@ proc start_server {filename overrides {code undefined}} {
|
||||
lappend ::servers $srv
|
||||
|
||||
# execute provided block
|
||||
set curnum $::testnum
|
||||
catch { uplevel 1 $code } err
|
||||
if {$curnum == $::testnum} {
|
||||
# don't check for leaks when no tests were executed
|
||||
dict set srv "skipleaks" 1
|
||||
}
|
||||
|
||||
# pop the server object
|
||||
set ::servers [lrange $::servers 0 end-1]
|
||||
|
||||
# allow an exception to bubble up the call chain but still kill this
|
||||
# server, because we want to reuse the ports when the tests are re-run
|
||||
if {$err eq "exception"} {
|
||||
puts [format "Logged warnings (pid %d):" [dict get $srv "pid"]]
|
||||
set warnings [warnings_from_file [dict get $srv "stdout"]]
|
||||
if {[string length $warnings] > 0} {
|
||||
puts "$warnings"
|
||||
} else {
|
||||
puts "(none)"
|
||||
}
|
||||
# kill this server without checking for leaks
|
||||
dict set srv "skipleaks" 1
|
||||
kill_server $srv
|
||||
|
||||
if {[string length $err] > 0} {
|
||||
error "exception"
|
||||
} elseif {[string length $err] > 0} {
|
||||
puts "Error executing the suite, aborting..."
|
||||
puts $err
|
||||
exit 1
|
||||
}
|
||||
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
kill_server $srv
|
||||
} else {
|
||||
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
||||
set _ $srv
|
||||
}
|
||||
}
|
||||
|
@ -3,20 +3,34 @@ set ::failed 0
|
||||
set ::testnum 0
|
||||
|
||||
proc test {name code okpattern} {
|
||||
# abort if tagged with a tag to deny
|
||||
foreach tag $::denytags {
|
||||
if {[lsearch $::tags $tag] >= 0} {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
# check if tagged with at least 1 tag to allow when there *is* a list
|
||||
# of tags to allow, because default policy is to run everything
|
||||
if {[llength $::allowtags] > 0} {
|
||||
set matched 0
|
||||
foreach tag $::allowtags {
|
||||
if {[lsearch $::tags $tag] >= 0} {
|
||||
incr matched
|
||||
}
|
||||
}
|
||||
if {$matched < 1} {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
incr ::testnum
|
||||
# if {$::testnum < $::first || $::testnum > $::last} return
|
||||
puts -nonewline [format "#%03d %-68s " $::testnum $name]
|
||||
flush stdout
|
||||
if {[catch {set retval [uplevel 1 $code]} error]} {
|
||||
puts "ERROR\n\nLogged warnings:"
|
||||
foreach file [glob tests/tmp/server.[pid].*/stdout] {
|
||||
set warnings [warnings_from_file $file]
|
||||
if {[string length $warnings] > 0} {
|
||||
puts $warnings
|
||||
}
|
||||
}
|
||||
puts "Script died with $error"
|
||||
exit 1
|
||||
puts "EXCEPTION"
|
||||
puts "\nCaught error: $error"
|
||||
error "exception"
|
||||
}
|
||||
if {$okpattern eq $retval || [string match $okpattern $retval]} {
|
||||
puts "PASSED"
|
||||
|
@ -13,9 +13,10 @@ set ::host 127.0.0.1
|
||||
set ::port 16379
|
||||
set ::traceleaks 0
|
||||
set ::valgrind 0
|
||||
set ::denytags {}
|
||||
set ::allowtags {}
|
||||
|
||||
proc execute_tests name {
|
||||
set cur $::testnum
|
||||
source "tests/$name.tcl"
|
||||
}
|
||||
|
||||
@ -92,4 +93,31 @@ proc main {} {
|
||||
cleanup
|
||||
}
|
||||
|
||||
main
|
||||
# parse arguments
|
||||
for {set j 0} {$j < [llength $argv]} {incr j} {
|
||||
set opt [lindex $argv $j]
|
||||
set arg [lindex $argv [expr $j+1]]
|
||||
if {$opt eq {--tags}} {
|
||||
foreach tag $arg {
|
||||
if {[string index $tag 0] eq "-"} {
|
||||
lappend ::denytags [string range $tag 1 end]
|
||||
} else {
|
||||
lappend ::allowtags $tag
|
||||
}
|
||||
}
|
||||
incr j
|
||||
} else {
|
||||
puts "Wrong argument: $opt"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
if {[catch { main } err]} {
|
||||
if {[string length $err] > 0} {
|
||||
# only display error when not generated by the test suite
|
||||
if {$err ne "exception"} {
|
||||
puts $err
|
||||
}
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {{requirepass foobar}} {
|
||||
start_server {tags {"auth"} overrides {requirepass foobar}} {
|
||||
test {AUTH fails when a wrong password is given} {
|
||||
catch {r auth wrong!} err
|
||||
format $err
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"basic"}} {
|
||||
test {DEL all keys to start with a clean DB} {
|
||||
foreach key [r keys *] {r del $key}
|
||||
r dbsize
|
||||
@ -52,6 +52,7 @@ start_server default.conf {} {
|
||||
r get foo
|
||||
} [string repeat "abcd" 1000000]
|
||||
|
||||
tags {"slow"} {
|
||||
test {Very big payload random access} {
|
||||
set err {}
|
||||
array set payload {}
|
||||
@ -92,6 +93,7 @@ start_server default.conf {} {
|
||||
test {DBSIZE should be 10101 now} {
|
||||
r dbsize
|
||||
} {10101}
|
||||
}
|
||||
|
||||
test {INCR against non existing key} {
|
||||
set res {}
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"cas"}} {
|
||||
test {EXEC works on WATCHed key not modified} {
|
||||
r watch x y z
|
||||
r watch k
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"expire"}} {
|
||||
test {EXPIRE - don't set timeouts multiple times} {
|
||||
r set x foobar
|
||||
set v1 [r expire x 5]
|
||||
@ -12,10 +12,12 @@ start_server default.conf {} {
|
||||
r get x
|
||||
} {foobar}
|
||||
|
||||
tags {"slow"} {
|
||||
test {EXPIRE - After 6 seconds the key should no longer be here} {
|
||||
after 6000
|
||||
list [r get x] [r exists x]
|
||||
} {{} 0}
|
||||
}
|
||||
|
||||
test {EXPIRE - Delete on write policy} {
|
||||
r del x
|
||||
@ -46,10 +48,12 @@ start_server default.conf {} {
|
||||
r get y
|
||||
} {foo}
|
||||
|
||||
tags {"slow"} {
|
||||
test {SETEX - Wait for the key to expire} {
|
||||
after 3000
|
||||
r get y
|
||||
} {}
|
||||
}
|
||||
|
||||
test {SETEX - Wrong time parameter} {
|
||||
catch {r setex z -10 foo} e
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {SAVE - make sure there are all the types as values} {
|
||||
# Wait for a background saving in progress to terminate
|
||||
waitForBgsave r
|
||||
@ -12,6 +12,7 @@ start_server default.conf {} {
|
||||
r save
|
||||
} {OK}
|
||||
|
||||
tags {"slow"} {
|
||||
foreach fuzztype {binary alpha compr} {
|
||||
test "FUZZ stresser with data model $fuzztype" {
|
||||
set err 0
|
||||
@ -27,6 +28,7 @@ start_server default.conf {} {
|
||||
set _ $err
|
||||
} {0}
|
||||
}
|
||||
}
|
||||
|
||||
test {BGSAVE} {
|
||||
waitForBgsave r
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {} {
|
||||
test {Handle an empty query well} {
|
||||
set fd [r channel]
|
||||
puts -nonewline $fd "\r\n"
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"sort"}} {
|
||||
test {SORT ALPHA against integer encoded strings} {
|
||||
r del mylist
|
||||
r lpush mylist 2
|
||||
@ -8,6 +8,8 @@ start_server default.conf {} {
|
||||
r sort mylist alpha
|
||||
} {1 10 2 3}
|
||||
|
||||
tags {"slow"} {
|
||||
set res {}
|
||||
test {Create a random list and a random set} {
|
||||
set tosort {}
|
||||
array set seenrand {}
|
||||
@ -30,7 +32,6 @@ start_server default.conf {} {
|
||||
lappend tosort [list $i $rint]
|
||||
}
|
||||
set sorted [lsort -index 1 -real $tosort]
|
||||
set res {}
|
||||
for {set i 0} {$i < 10000} {incr i} {
|
||||
lappend res [lindex $sorted $i 0]
|
||||
}
|
||||
@ -132,6 +133,7 @@ start_server default.conf {} {
|
||||
flush stdout
|
||||
format {}
|
||||
} {}
|
||||
}
|
||||
|
||||
test {SORT regression for issue #19, sorting floats} {
|
||||
r flushdb
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"hash"}} {
|
||||
test {HSET/HLEN - Small hash creation} {
|
||||
array set smallhash {}
|
||||
for {set i 0} {$i < 8} {incr i} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"list"}} {
|
||||
test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} {
|
||||
set res [r lpush mylist a]
|
||||
append res [r lpush mylist b]
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"set"}} {
|
||||
test {SADD, SCARD, SISMEMBER, SMEMBERS basics} {
|
||||
r sadd myset foo
|
||||
r sadd myset bar
|
||||
|
@ -1,4 +1,4 @@
|
||||
start_server default.conf {} {
|
||||
start_server {tags {"zset"}} {
|
||||
test {ZSET basic ZADD and score update} {
|
||||
r zadd ztmp 10 x
|
||||
r zadd ztmp 20 y
|
||||
@ -162,6 +162,7 @@ start_server default.conf {} {
|
||||
r zrangebyscore zset 2 4 withscores
|
||||
} {b 2 c 3 d 4}
|
||||
|
||||
tags {"slow"} {
|
||||
test {ZRANGEBYSCORE fuzzy test, 100 ranges in 1000 elements sorted set} {
|
||||
set err {}
|
||||
r del zset
|
||||
@ -241,6 +242,7 @@ start_server default.conf {} {
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
}
|
||||
|
||||
test {ZRANGEBYSCORE with LIMIT} {
|
||||
r del zset
|
||||
@ -356,6 +358,7 @@ start_server default.conf {} {
|
||||
list [r zinterstore zsetc 2 zseta zsetb aggregate max] [r zrange zsetc 0 -1 withscores]
|
||||
} {2 {b 2 c 3}}
|
||||
|
||||
tags {"slow"} {
|
||||
test {ZSETs skiplist implementation backlink consistency test} {
|
||||
set diff 0
|
||||
set elements 10000
|
||||
@ -397,6 +400,7 @@ start_server default.conf {} {
|
||||
}
|
||||
set _ $err
|
||||
} {}
|
||||
}
|
||||
|
||||
test {ZSET element can't be set to nan with ZADD} {
|
||||
set e {}
|
||||
|
Loading…
Reference in New Issue
Block a user