2010-06-02 16:53:22 -04:00
|
|
|
set ::global_overrides {}
|
|
|
|
set ::tags {}
|
2011-07-06 09:22:00 -04:00
|
|
|
set ::valgrind_errors {}
|
2010-06-02 16:53:22 -04:00
|
|
|
|
2011-12-07 05:47:38 -05:00
|
|
|
proc start_server_error {config_file error} {
|
|
|
|
set err {}
|
2018-09-04 06:32:02 -04:00
|
|
|
append err "Can't start the Redis server\n"
|
2011-12-07 05:47:38 -05:00
|
|
|
append err "CONFIGURATION:"
|
|
|
|
append err [exec cat $config_file]
|
|
|
|
append err "\nERROR:"
|
|
|
|
append err [string trim $error]
|
|
|
|
send_data_packet $::test_server_fd err $err
|
2010-05-14 11:31:11 -04:00
|
|
|
}
|
|
|
|
|
2010-05-21 06:00:13 -04:00
|
|
|
proc check_valgrind_errors stderr {
|
2020-08-14 09:05:34 -04:00
|
|
|
set res [find_valgrind_errors $stderr true]
|
2020-09-06 04:11:49 -04:00
|
|
|
if {$res != ""} {
|
|
|
|
send_data_packet $::test_server_fd err "Valgrind error: $res\n"
|
2010-05-21 06:00:13 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-31 03:44:43 -04:00
|
|
|
proc clean_persistence config {
|
|
|
|
# we may wanna keep the logs for later, but let's clean the persistence
|
|
|
|
# files right away, since they can accumulate and take up a lot of space
|
|
|
|
set config [dict get $config "config"]
|
|
|
|
set rdb [format "%s/%s" [dict get $config "dir"] "dump.rdb"]
|
|
|
|
set aof [format "%s/%s" [dict get $config "dir"] "appendonly.aof"]
|
|
|
|
catch {exec rm -rf $rdb}
|
|
|
|
catch {exec rm -rf $aof}
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
proc kill_server config {
|
2010-06-14 04:19:45 -04:00
|
|
|
# nothing to kill when running against external server
|
|
|
|
if {$::external} return
|
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# nevermind if its already dead
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
if {![is_alive $config]} {
|
|
|
|
# Check valgrind errors if needed
|
|
|
|
if {$::valgrind} {
|
|
|
|
check_valgrind_errors [dict get $config stderr]
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2010-05-14 11:45:27 -04:00
|
|
|
set pid [dict get $config pid]
|
|
|
|
|
2010-05-14 11:49:39 -04:00
|
|
|
# check for leaks
|
2010-06-02 15:20:29 -04:00
|
|
|
if {![dict exists $config "skipleaks"]} {
|
|
|
|
catch {
|
|
|
|
if {[string match {*Darwin*} [exec uname -a]]} {
|
2010-06-02 18:27:09 -04:00
|
|
|
tags {"leaks"} {
|
|
|
|
test "Check for memory leaks (pid $pid)" {
|
2012-12-03 06:06:38 -05:00
|
|
|
set output {0 leaks}
|
2020-12-23 09:28:17 -05:00
|
|
|
catch {exec leaks $pid} output option
|
|
|
|
# In a few tests we kill the server process, so leaks will not find it.
|
|
|
|
# It'll exits with exit code >1 on error, so we ignore these.
|
|
|
|
if {[dict exists $option -errorcode]} {
|
|
|
|
set details [dict get $option -errorcode]
|
|
|
|
if {[lindex $details 0] eq "CHILDSTATUS"} {
|
|
|
|
set status [lindex $details 2]
|
|
|
|
if {$status > 1} {
|
|
|
|
set output "0 leaks"
|
|
|
|
}
|
|
|
|
}
|
2014-03-25 04:33:27 -04:00
|
|
|
}
|
2012-12-03 06:06:38 -05:00
|
|
|
set output
|
2010-06-02 18:27:09 -04:00
|
|
|
} {*0 leaks*}
|
|
|
|
}
|
2010-06-02 15:20:29 -04:00
|
|
|
}
|
2010-05-14 11:49:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
# kill server and wait for the process to be totally exited
|
2020-02-21 11:08:45 -05:00
|
|
|
send_data_packet $::test_server_fd server-killing $pid
|
2012-04-03 08:18:35 -04:00
|
|
|
catch {exec kill $pid}
|
2015-03-31 17:43:38 -04:00
|
|
|
if {$::valgrind} {
|
|
|
|
set max_wait 60000
|
|
|
|
} else {
|
|
|
|
set max_wait 10000
|
|
|
|
}
|
2010-05-19 08:33:39 -04:00
|
|
|
while {[is_alive $config]} {
|
2012-04-03 08:18:35 -04:00
|
|
|
incr wait 10
|
|
|
|
|
2015-03-31 17:43:38 -04:00
|
|
|
if {$wait >= $max_wait} {
|
2012-04-03 08:18:35 -04:00
|
|
|
puts "Forcing process $pid to exit..."
|
|
|
|
catch {exec kill -KILL $pid}
|
|
|
|
} elseif {$wait % 1000 == 0} {
|
2010-05-19 08:33:39 -04:00
|
|
|
puts "Waiting for process $pid to exit..."
|
2010-05-14 11:45:27 -04:00
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
|
|
|
|
# Check valgrind errors if needed
|
|
|
|
if {$::valgrind} {
|
|
|
|
check_valgrind_errors [dict get $config stderr]
|
|
|
|
}
|
2014-11-28 05:38:14 -05:00
|
|
|
|
|
|
|
# Remove this pid from the set of active pids in the test server.
|
|
|
|
send_data_packet $::test_server_fd server-killed $pid
|
2010-05-14 11:45:27 -04:00
|
|
|
}
|
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
proc is_alive config {
|
|
|
|
set pid [dict get $config pid]
|
2020-10-18 07:50:29 -04:00
|
|
|
if {[catch {exec kill -0 $pid} err]} {
|
2010-05-19 08:33:39 -04:00
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-21 06:00:13 -04:00
|
|
|
proc ping_server {host port} {
|
|
|
|
set retval 0
|
|
|
|
if {[catch {
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
|
|
|
set fd [::tls::socket $host $port]
|
|
|
|
} else {
|
|
|
|
set fd [socket $host $port]
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
fconfigure $fd -translation binary
|
|
|
|
puts $fd "PING\r\n"
|
|
|
|
flush $fd
|
|
|
|
set reply [gets $fd]
|
2013-02-12 10:25:41 -05:00
|
|
|
if {[string range $reply 0 0] eq {+} ||
|
|
|
|
[string range $reply 0 0] eq {-}} {
|
2010-05-21 06:00:13 -04:00
|
|
|
set retval 1
|
|
|
|
}
|
|
|
|
close $fd
|
|
|
|
} e]} {
|
2010-12-10 10:13:21 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts -nonewline "."
|
|
|
|
}
|
2010-08-31 05:17:06 -04:00
|
|
|
} else {
|
2010-12-10 10:13:21 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts -nonewline "ok"
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
}
|
|
|
|
return $retval
|
|
|
|
}
|
|
|
|
|
2014-02-17 06:29:54 -05:00
|
|
|
# Return 1 if the server at the specified addr is reachable by PING, otherwise
|
|
|
|
# returns 0. Performs a try every 50 milliseconds for the specified number
|
|
|
|
# of retries.
|
|
|
|
proc server_is_up {host port retrynum} {
|
|
|
|
after 10 ;# Use a small delay to make likely a first-try success.
|
|
|
|
set retval 0
|
|
|
|
while {[incr retrynum -1]} {
|
|
|
|
if {[catch {ping_server $host $port} ping]} {
|
|
|
|
set ping 0
|
|
|
|
}
|
|
|
|
if {$ping} {return 1}
|
|
|
|
after 50
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-01-17 08:48:48 -05:00
|
|
|
# Check if current ::tags match requested tags. If ::allowtags are used,
|
|
|
|
# there must be some intersection. If ::denytags are used, no intersection
|
|
|
|
# is allowed. Returns 1 if tags are acceptable or 0 otherwise, in which
|
|
|
|
# case err_return names a return variable for the message to be logged.
|
|
|
|
proc tags_acceptable {err_return} {
|
|
|
|
upvar $err_return err
|
|
|
|
|
|
|
|
# If tags are whitelisted, make sure there's match
|
|
|
|
if {[llength $::allowtags] > 0} {
|
|
|
|
set matched 0
|
|
|
|
foreach tag $::allowtags {
|
|
|
|
if {[lsearch $::tags $tag] >= 0} {
|
|
|
|
incr matched
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if {$matched < 1} {
|
|
|
|
set err "Tag: none of the tags allowed"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach tag $::denytags {
|
|
|
|
if {[lsearch $::tags $tag] >= 0} {
|
|
|
|
set err "Tag: $tag denied"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2010-06-02 16:53:22 -04:00
|
|
|
# doesn't really belong here, but highly coupled to code in start_server
|
|
|
|
proc tags {tags code} {
|
2020-08-31 04:05:30 -04:00
|
|
|
# If we 'tags' contain multiple tags, quoted and seperated by spaces,
|
|
|
|
# we want to get rid of the quotes in order to have a proper list
|
|
|
|
set tags [string map { \" "" } $tags]
|
2010-06-02 16:53:22 -04:00
|
|
|
set ::tags [concat $::tags $tags]
|
2021-01-17 08:48:48 -05:00
|
|
|
if {![tags_acceptable err]} {
|
|
|
|
incr ::num_aborted
|
|
|
|
send_data_packet $::test_server_fd ignore $err
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
2020-08-31 04:05:30 -04:00
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
uplevel 1 $code
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
}
|
2020-02-24 04:46:23 -05:00
|
|
|
|
|
|
|
# Write the configuration in the dictionary 'config' in the specified
|
|
|
|
# file name.
|
|
|
|
proc create_server_config_file {filename config} {
|
|
|
|
set fp [open $filename w+]
|
|
|
|
foreach directive [dict keys $config] {
|
|
|
|
puts -nonewline $fp "$directive "
|
|
|
|
puts $fp [dict get $config $directive]
|
|
|
|
}
|
|
|
|
close $fp
|
|
|
|
}
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
proc spawn_server {config_file stdout stderr} {
|
|
|
|
if {$::valgrind} {
|
|
|
|
set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file >> $stdout 2>> $stderr &]
|
|
|
|
} elseif ($::stack_logging) {
|
|
|
|
set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt src/redis-server $config_file >> $stdout 2>> $stderr &]
|
|
|
|
} else {
|
|
|
|
set pid [exec src/redis-server $config_file >> $stdout 2>> $stderr &]
|
|
|
|
}
|
|
|
|
|
|
|
|
if {$::wait_server} {
|
|
|
|
set msg "server started PID: $pid. press any key to continue..."
|
|
|
|
puts $msg
|
|
|
|
read stdin 1
|
|
|
|
}
|
|
|
|
|
|
|
|
# Tell the test server about this new instance.
|
|
|
|
send_data_packet $::test_server_fd server-spawned $pid
|
|
|
|
return $pid
|
|
|
|
}
|
|
|
|
|
|
|
|
# Wait for actual startup, return 1 if port is busy, 0 otherwise
|
|
|
|
proc wait_server_started {config_file stdout pid} {
|
|
|
|
set checkperiod 100; # Milliseconds
|
|
|
|
set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes.
|
|
|
|
set port_busy 0
|
|
|
|
while 1 {
|
|
|
|
if {[regexp -- " PID: $pid" [exec cat $stdout]]} {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
after $checkperiod
|
|
|
|
incr maxiter -1
|
|
|
|
if {$maxiter == 0} {
|
|
|
|
start_server_error $config_file "No PID detected in log $stdout"
|
|
|
|
puts "--- LOG CONTENT ---"
|
|
|
|
puts [exec cat $stdout]
|
|
|
|
puts "-------------------"
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
# Check if the port is actually busy and the server failed
|
|
|
|
# for this reason.
|
|
|
|
if {[regexp {Could not create server TCP} [exec cat $stdout]]} {
|
|
|
|
set port_busy 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return $port_busy
|
|
|
|
}
|
|
|
|
|
2010-06-02 16:23:52 -04:00
|
|
|
proc start_server {options {code undefined}} {
|
|
|
|
# setup defaults
|
|
|
|
set baseconfig "default.conf"
|
|
|
|
set overrides {}
|
2020-12-11 11:31:40 -05:00
|
|
|
set omit {}
|
2010-06-02 16:53:22 -04:00
|
|
|
set tags {}
|
2020-08-31 03:44:43 -04:00
|
|
|
set keep_persistence false
|
2010-06-02 16:23:52 -04:00
|
|
|
|
|
|
|
# parse options
|
|
|
|
foreach {option value} $options {
|
|
|
|
switch $option {
|
2010-06-02 16:53:22 -04:00
|
|
|
"config" {
|
2020-08-31 03:44:43 -04:00
|
|
|
set baseconfig $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
"overrides" {
|
2020-08-31 03:44:43 -04:00
|
|
|
set overrides $value
|
|
|
|
}
|
2020-12-11 11:31:40 -05:00
|
|
|
"omit" {
|
|
|
|
set omit $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
"tags" {
|
2020-08-31 04:05:30 -04:00
|
|
|
# If we 'tags' contain multiple tags, quoted and seperated by spaces,
|
|
|
|
# we want to get rid of the quotes in order to have a proper list
|
|
|
|
set tags [string map { \" "" } $value]
|
|
|
|
set ::tags [concat $::tags $tags]
|
2020-08-31 03:44:43 -04:00
|
|
|
}
|
|
|
|
"keep_persistence" {
|
|
|
|
set keep_persistence $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
default {
|
2020-08-31 03:44:43 -04:00
|
|
|
error "Unknown option $option"
|
|
|
|
}
|
2010-06-02 16:23:52 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-31 04:05:30 -04:00
|
|
|
# We skip unwanted tags
|
2021-01-17 08:48:48 -05:00
|
|
|
if {![tags_acceptable err]} {
|
|
|
|
incr ::num_aborted
|
|
|
|
send_data_packet $::test_server_fd ignore $err
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
2020-08-31 04:05:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
# If we are running against an external server, we just push the
|
|
|
|
# host/port pair in the stack the first time
|
|
|
|
if {$::external} {
|
|
|
|
if {[llength $::servers] == 0} {
|
|
|
|
set srv {}
|
|
|
|
dict set srv "host" $::host
|
|
|
|
dict set srv "port" $::port
|
|
|
|
set client [redis $::host $::port 0 $::tls]
|
|
|
|
dict set srv "client" $client
|
|
|
|
$client select 9
|
|
|
|
|
|
|
|
set config {}
|
|
|
|
dict set config "port" $::port
|
|
|
|
dict set srv "config" $config
|
|
|
|
|
|
|
|
# append the server to the stack
|
|
|
|
lappend ::servers $srv
|
|
|
|
}
|
2020-08-31 04:16:06 -04:00
|
|
|
r flushall
|
2020-08-31 04:24:17 -04:00
|
|
|
if {[catch {set retval [uplevel 1 $code]} error]} {
|
|
|
|
if {$::durable} {
|
|
|
|
set msg [string range $error 10 end]
|
|
|
|
lappend details $msg
|
|
|
|
lappend details $::errorInfo
|
|
|
|
lappend ::tests_failed $details
|
|
|
|
|
|
|
|
incr ::num_failed
|
|
|
|
send_data_packet $::test_server_fd err [join $details "\n"]
|
|
|
|
} else {
|
|
|
|
# Re-raise, let handler up the stack take care of this.
|
|
|
|
error $error $::errorInfo
|
|
|
|
}
|
|
|
|
}
|
2020-08-31 04:05:30 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2010-06-02 16:23:52 -04:00
|
|
|
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
|
2010-05-14 11:31:11 -04:00
|
|
|
set config {}
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
2020-12-11 11:31:40 -05:00
|
|
|
dict set config "tls-cert-file" [format "%s/tests/tls/server.crt" [pwd]]
|
|
|
|
dict set config "tls-key-file" [format "%s/tests/tls/server.key" [pwd]]
|
|
|
|
dict set config "tls-client-cert-file" [format "%s/tests/tls/client.crt" [pwd]]
|
|
|
|
dict set config "tls-client-key-file" [format "%s/tests/tls/client.key" [pwd]]
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set config "tls-dh-params-file" [format "%s/tests/tls/redis.dh" [pwd]]
|
|
|
|
dict set config "tls-ca-cert-file" [format "%s/tests/tls/ca.crt" [pwd]]
|
|
|
|
dict set config "loglevel" "debug"
|
|
|
|
}
|
2010-05-14 11:31:11 -04:00
|
|
|
foreach line $data {
|
|
|
|
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
|
|
|
|
set elements [split $line " "]
|
|
|
|
set directive [lrange $elements 0 0]
|
|
|
|
set arguments [lrange $elements 1 end]
|
|
|
|
dict set config $directive $arguments
|
|
|
|
}
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# use a different directory every time a server is started
|
|
|
|
dict set config dir [tmpdir server]
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2010-05-14 11:42:07 -04:00
|
|
|
# start every server on a different port
|
2020-05-26 04:00:48 -04:00
|
|
|
set port [find_available_port $::baseport $::portcount]
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
|
|
|
dict set config "port" 0
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config "tls-port" $port
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set config "tls-cluster" "yes"
|
|
|
|
dict set config "tls-replication" "yes"
|
|
|
|
} else {
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config port $port
|
2019-09-12 03:56:54 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
set unixsocket [file normalize [format "%s/%s" [dict get $config "dir"] "socket"]]
|
|
|
|
dict set config "unixsocket" $unixsocket
|
2010-05-14 11:42:07 -04:00
|
|
|
|
2010-05-20 07:58:58 -04:00
|
|
|
# apply overrides from global space and arguments
|
2010-06-02 16:23:52 -04:00
|
|
|
foreach {directive arguments} [concat $::global_overrides $overrides] {
|
2010-05-14 11:31:11 -04:00
|
|
|
dict set config $directive $arguments
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2020-12-11 11:31:40 -05:00
|
|
|
# remove directives that are marked to be omitted
|
|
|
|
foreach directive $omit {
|
|
|
|
dict unset config $directive
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# write new configuration to temporary file
|
|
|
|
set config_file [tmpfile redis.conf]
|
2020-02-24 04:46:23 -05:00
|
|
|
create_server_config_file $config_file $config
|
2010-05-14 11:31:11 -04:00
|
|
|
|
|
|
|
set stdout [format "%s/%s" [dict get $config "dir"] "stdout"]
|
|
|
|
set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
|
2010-05-21 06:00:13 -04:00
|
|
|
|
2020-08-31 03:23:09 -04:00
|
|
|
# if we're inside a test, write the test name to the server log file
|
|
|
|
if {[info exists ::cur_test]} {
|
|
|
|
set fd [open $stdout "a+"]
|
|
|
|
puts $fd "### Starting server for test $::cur_test"
|
|
|
|
close $fd
|
|
|
|
}
|
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
# We need a loop here to retry with different ports.
|
|
|
|
set server_started 0
|
|
|
|
while {$server_started == 0} {
|
|
|
|
if {$::verbose} {
|
2020-05-26 04:00:48 -04:00
|
|
|
puts -nonewline "=== ($tags) Starting server ${::host}:${port} "
|
2020-02-24 04:46:23 -05:00
|
|
|
}
|
2020-02-21 11:08:45 -05:00
|
|
|
|
2020-05-26 04:00:48 -04:00
|
|
|
send_data_packet $::test_server_fd "server-spawning" "port $port"
|
2014-07-31 14:39:49 -04:00
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set pid [spawn_server $config_file $stdout $stderr]
|
2020-02-24 04:46:23 -05:00
|
|
|
|
|
|
|
# check that the server actually started
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set port_busy [wait_server_started $config_file $stdout $pid]
|
2010-12-10 10:13:21 -05:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
# Sometimes we have to try a different port, even if we checked
|
|
|
|
# for availability. Other test clients may grab the port before we
|
|
|
|
# are able to do it for example.
|
|
|
|
if {$port_busy} {
|
2020-05-26 04:00:48 -04:00
|
|
|
puts "Port $port was already busy, trying another port..."
|
|
|
|
set port [find_available_port $::baseport $::portcount]
|
2020-02-24 04:46:23 -05:00
|
|
|
if {$::tls} {
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config "tls-port" $port
|
2020-02-24 04:46:23 -05:00
|
|
|
} else {
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config port $port
|
2020-02-24 04:46:23 -05:00
|
|
|
}
|
|
|
|
create_server_config_file $config_file $config
|
2020-09-07 10:30:36 -04:00
|
|
|
|
|
|
|
# Truncate log so wait_server_started will not be looking at
|
|
|
|
# output of the failed server.
|
|
|
|
close [open $stdout "w"]
|
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
continue; # Try again
|
|
|
|
}
|
2010-12-10 10:13:21 -05:00
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
|
2020-02-24 04:46:23 -05:00
|
|
|
if {$code ne "undefined"} {
|
2020-05-26 04:00:48 -04:00
|
|
|
set serverisup [server_is_up $::host $port $retrynum]
|
2020-02-24 04:46:23 -05:00
|
|
|
} else {
|
|
|
|
set serverisup 1
|
|
|
|
}
|
2010-08-31 05:17:06 -04:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts ""
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
if {!$serverisup} {
|
|
|
|
set err {}
|
|
|
|
append err [exec cat $stdout] "\n" [exec cat $stderr]
|
|
|
|
start_server_error $config_file $err
|
|
|
|
return
|
2020-02-21 12:55:56 -05:00
|
|
|
}
|
2020-02-24 04:46:23 -05:00
|
|
|
set server_started 1
|
2014-11-28 05:49:26 -05:00
|
|
|
}
|
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# setup properties to be able to initialize a client object
|
2019-09-12 03:56:54 -04:00
|
|
|
set port_param [expr $::tls ? {"tls-port"} : {"port"}]
|
2010-05-14 11:31:11 -04:00
|
|
|
set host $::host
|
|
|
|
if {[dict exists $config bind]} { set host [dict get $config bind] }
|
2019-09-12 03:56:54 -04:00
|
|
|
if {[dict exists $config $port_param]} { set port [dict get $config $port_param] }
|
2010-05-14 11:31:11 -04:00
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
# setup config dict
|
2010-10-13 05:25:40 -04:00
|
|
|
dict set srv "config_file" $config_file
|
|
|
|
dict set srv "config" $config
|
2010-05-14 14:50:32 -04:00
|
|
|
dict set srv "pid" $pid
|
|
|
|
dict set srv "host" $host
|
|
|
|
dict set srv "port" $port
|
|
|
|
dict set srv "stdout" $stdout
|
|
|
|
dict set srv "stderr" $stderr
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set srv "unixsocket" $unixsocket
|
2010-05-14 11:45:27 -04:00
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# if a block of code is supplied, we wait for the server to become
|
|
|
|
# available, create a client object and kill the server afterwards
|
2010-05-14 11:31:11 -04:00
|
|
|
if {$code ne "undefined"} {
|
2010-05-19 08:33:39 -04:00
|
|
|
set line [exec head -n1 $stdout]
|
|
|
|
if {[string match {*already in use*} $line]} {
|
|
|
|
error_and_quit $config_file $line
|
|
|
|
}
|
|
|
|
|
|
|
|
while 1 {
|
|
|
|
# check that the server actually started and is ready for connections
|
2020-08-14 09:05:34 -04:00
|
|
|
if {[count_message_lines $stdout "Ready to accept"] > 0} {
|
2010-05-19 08:33:39 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
|
|
|
|
2010-05-14 14:50:32 -04:00
|
|
|
# append the server to the stack
|
|
|
|
lappend ::servers $srv
|
2010-10-13 05:25:40 -04:00
|
|
|
|
|
|
|
# connect client (after server dict is put on the stack)
|
|
|
|
reconnect
|
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# execute provided block
|
2010-12-10 10:13:21 -05:00
|
|
|
set num_tests $::num_tests
|
|
|
|
if {[catch { uplevel 1 $code } error]} {
|
|
|
|
set backtrace $::errorInfo
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
|
|
|
|
set srv [lindex $::servers end]
|
|
|
|
|
2020-07-21 09:56:19 -04:00
|
|
|
# pop the server object
|
|
|
|
set ::servers [lrange $::servers 0 end-1]
|
|
|
|
|
2010-12-10 10:13:21 -05:00
|
|
|
# Kill the server without checking for leaks
|
|
|
|
dict set srv "skipleaks" 1
|
|
|
|
kill_server $srv
|
|
|
|
|
|
|
|
# Print warnings from log
|
|
|
|
puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]]
|
|
|
|
set warnings [warnings_from_file [dict get $srv "stdout"]]
|
|
|
|
if {[string length $warnings] > 0} {
|
|
|
|
puts "$warnings"
|
|
|
|
} else {
|
|
|
|
puts "(none)"
|
|
|
|
}
|
|
|
|
puts ""
|
|
|
|
|
2020-08-31 04:24:17 -04:00
|
|
|
if {$::durable} {
|
|
|
|
set msg [string range $error 10 end]
|
|
|
|
lappend details $msg
|
|
|
|
lappend details $backtrace
|
|
|
|
lappend ::tests_failed $details
|
|
|
|
|
|
|
|
incr ::num_failed
|
|
|
|
send_data_packet $::test_server_fd err [join $details "\n"]
|
|
|
|
} else {
|
|
|
|
# Re-raise, let handler up the stack take care of this.
|
|
|
|
error $error $backtrace
|
|
|
|
}
|
2010-08-21 04:54:31 -04:00
|
|
|
}
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
|
|
|
|
set srv [lindex $::servers end]
|
|
|
|
|
2010-12-10 10:13:21 -05:00
|
|
|
# Don't do the leak check when no tests were run
|
|
|
|
if {$num_tests == $::num_tests} {
|
2010-06-02 16:53:22 -04:00
|
|
|
dict set srv "skipleaks" 1
|
|
|
|
}
|
2010-05-14 11:31:11 -04:00
|
|
|
|
2010-05-14 14:50:32 -04:00
|
|
|
# pop the server object
|
|
|
|
set ::servers [lrange $::servers 0 end-1]
|
2010-06-02 15:20:29 -04:00
|
|
|
|
2010-06-02 18:25:32 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
2010-06-02 15:20:29 -04:00
|
|
|
kill_server $srv
|
2020-08-31 03:44:43 -04:00
|
|
|
if {!$keep_persistence} {
|
|
|
|
clean_persistence $srv
|
|
|
|
}
|
|
|
|
set _ ""
|
2010-05-14 11:31:11 -04:00
|
|
|
} else {
|
2010-06-02 18:25:32 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
2010-05-14 14:50:32 -04:00
|
|
|
set _ $srv
|
2010-05-14 11:31:11 -04:00
|
|
|
}
|
|
|
|
}
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2020-08-14 09:05:34 -04:00
|
|
|
proc restart_server {level wait_ready rotate_logs} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set srv [lindex $::servers end+$level]
|
|
|
|
kill_server $srv
|
|
|
|
|
2020-08-14 09:05:34 -04:00
|
|
|
set pid [dict get $srv "pid"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set stdout [dict get $srv "stdout"]
|
|
|
|
set stderr [dict get $srv "stderr"]
|
2020-08-14 09:05:34 -04:00
|
|
|
if {$rotate_logs} {
|
|
|
|
set ts [clock format [clock seconds] -format %y%m%d%H%M%S]
|
|
|
|
file rename $stdout $stdout.$ts.$pid
|
|
|
|
file rename $stderr $stderr.$ts.$pid
|
|
|
|
}
|
|
|
|
set prev_ready_count [count_message_lines $stdout "Ready to accept"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2020-08-31 03:23:09 -04:00
|
|
|
# if we're inside a test, write the test name to the server log file
|
|
|
|
if {[info exists ::cur_test]} {
|
|
|
|
set fd [open $stdout "a+"]
|
|
|
|
puts $fd "### Restarting server for test $::cur_test"
|
|
|
|
close $fd
|
|
|
|
}
|
|
|
|
|
2020-08-14 09:05:34 -04:00
|
|
|
set config_file [dict get $srv "config_file"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
|
|
|
set pid [spawn_server $config_file $stdout $stderr]
|
|
|
|
|
|
|
|
# check that the server actually started
|
|
|
|
wait_server_started $config_file $stdout $pid
|
|
|
|
|
|
|
|
# update the pid in the servers list
|
|
|
|
dict set srv "pid" $pid
|
|
|
|
# re-set $srv in the servers list
|
|
|
|
lset ::servers end+$level $srv
|
|
|
|
|
|
|
|
if {$wait_ready} {
|
|
|
|
while 1 {
|
|
|
|
# check that the server actually started and is ready for connections
|
2020-08-14 09:05:34 -04:00
|
|
|
if {[count_message_lines $stdout "Ready to accept"] > $prev_ready_count} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
|
|
|
}
|
|
|
|
reconnect $level
|
|
|
|
}
|