2010-06-02 16:53:22 -04:00
|
|
|
set ::global_overrides {}
|
|
|
|
set ::tags {}
|
2011-07-06 09:22:00 -04:00
|
|
|
set ::valgrind_errors {}
|
2010-06-02 16:53:22 -04:00
|
|
|
|
2011-12-07 05:47:38 -05:00
|
|
|
proc start_server_error {config_file error} {
|
|
|
|
set err {}
|
2018-09-04 06:32:02 -04:00
|
|
|
append err "Can't start the Redis server\n"
|
2023-12-11 13:15:19 -05:00
|
|
|
append err "CONFIGURATION:\n"
|
2011-12-07 05:47:38 -05:00
|
|
|
append err [exec cat $config_file]
|
2023-12-11 13:15:19 -05:00
|
|
|
append err "\nERROR:\n"
|
2011-12-07 05:47:38 -05:00
|
|
|
append err [string trim $error]
|
|
|
|
send_data_packet $::test_server_fd err $err
|
2010-05-14 11:31:11 -04:00
|
|
|
}
|
|
|
|
|
2010-05-21 06:00:13 -04:00
|
|
|
proc check_valgrind_errors stderr {
|
2020-08-14 09:05:34 -04:00
|
|
|
set res [find_valgrind_errors $stderr true]
|
2020-09-06 04:11:49 -04:00
|
|
|
if {$res != ""} {
|
|
|
|
send_data_packet $::test_server_fd err "Valgrind error: $res\n"
|
2010-05-21 06:00:13 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-11 06:51:33 -05:00
|
|
|
proc check_sanitizer_errors stderr {
|
|
|
|
set res [sanitizer_errors_from_file $stderr]
|
|
|
|
if {$res != ""} {
|
|
|
|
send_data_packet $::test_server_fd err "Sanitizer error: $res\n"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-31 03:44:43 -04:00
|
|
|
proc clean_persistence config {
|
|
|
|
# we may wanna keep the logs for later, but let's clean the persistence
|
|
|
|
# files right away, since they can accumulate and take up a lot of space
|
|
|
|
set config [dict get $config "config"]
|
Implement Multi Part AOF mechanism to avoid AOFRW overheads. (#9788)
Implement Multi-Part AOF mechanism to avoid overheads during AOFRW.
Introducing a folder with multiple AOF files tracked by a manifest file.
The main issues with the the original AOFRW mechanism are:
* buffering of commands that are processed during rewrite (consuming a lot of RAM)
* freezes of the main process when the AOFRW completes to drain the remaining part of the buffer and fsync it.
* double disk IO for the data that arrives during AOFRW (had to be written to both the old and new AOF files)
The main modifications of this PR:
1. Remove the AOF rewrite buffer and related code.
2. Divide the AOF into multiple files, they are classified as two types, one is the the `BASE` type,
it represents the full amount of data (Maybe AOF or RDB format) after each AOFRW, there is only
one `BASE` file at most. The second is `INCR` type, may have more than one. They represent the
incremental commands since the last AOFRW.
3. Use a AOF manifest file to record and manage these AOF files mentioned above.
4. The original configuration of `appendfilename` will be the base part of the new file name, for example:
`appendonly.aof.1.base.rdb` and `appendonly.aof.2.incr.aof`
5. Add manifest-related TCL tests, and modified some existing tests that depend on the `appendfilename`
6. Remove the `aof_rewrite_buffer_length` field in info.
7. Add `aof-disable-auto-gc` configuration. By default we're automatically deleting HISTORY type AOFs.
It also gives users the opportunity to preserve the history AOFs. just for testing use now.
8. Add AOFRW limiting measure. When the AOFRW failures reaches the threshold (3 times now),
we will delay the execution of the next AOFRW by 1 minute. If the next AOFRW also fails, it will be
delayed by 2 minutes. The next is 4, 8, 16, the maximum delay is 60 minutes (1 hour). During the limit
period, we can still use the 'bgrewriteaof' command to execute AOFRW immediately.
9. Support upgrade (load) data from old version redis.
10. Add `appenddirname` configuration, as the directory name of the append only files. All AOF files and
manifest file will be placed in this directory.
11. Only the last AOF file (BASE or INCR) can be truncated. Otherwise redis will exit even if
`aof-load-truncated` is enabled.
Co-authored-by: Oran Agra <oran@redislabs.com>
2022-01-03 12:14:13 -05:00
|
|
|
set dir [dict get $config "dir"]
|
|
|
|
set rdb [format "%s/%s" $dir "dump.rdb"]
|
|
|
|
if {[dict exists $config "appenddirname"]} {
|
|
|
|
set aofdir [dict get $config "appenddirname"]
|
|
|
|
} else {
|
|
|
|
set aofdir "appendonlydir"
|
|
|
|
}
|
|
|
|
set aof_dirpath [format "%s/%s" $dir $aofdir]
|
|
|
|
clean_aof_persistence $aof_dirpath
|
2020-08-31 03:44:43 -04:00
|
|
|
catch {exec rm -rf $rdb}
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
proc kill_server config {
|
2010-06-14 04:19:45 -04:00
|
|
|
# nothing to kill when running against external server
|
|
|
|
if {$::external} return
|
|
|
|
|
2021-09-13 11:16:47 -04:00
|
|
|
# Close client connection if exists
|
|
|
|
if {[dict exists $config "client"]} {
|
|
|
|
[dict get $config "client"] close
|
|
|
|
}
|
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# nevermind if its already dead
|
2023-10-02 01:20:53 -04:00
|
|
|
set pid [dict get $config pid]
|
|
|
|
if {![is_alive $pid]} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
# Check valgrind errors if needed
|
|
|
|
if {$::valgrind} {
|
|
|
|
check_valgrind_errors [dict get $config stderr]
|
|
|
|
}
|
2021-11-11 06:51:33 -05:00
|
|
|
|
|
|
|
check_sanitizer_errors [dict get $config stderr]
|
2023-10-13 09:28:52 -04:00
|
|
|
|
|
|
|
# Remove this pid from the set of active pids in the test server.
|
|
|
|
send_data_packet $::test_server_fd server-killed $pid
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
return
|
|
|
|
}
|
2010-05-14 11:45:27 -04:00
|
|
|
|
2010-05-14 11:49:39 -04:00
|
|
|
# check for leaks
|
2010-06-02 15:20:29 -04:00
|
|
|
if {![dict exists $config "skipleaks"]} {
|
|
|
|
catch {
|
|
|
|
if {[string match {*Darwin*} [exec uname -a]]} {
|
2010-06-02 18:27:09 -04:00
|
|
|
tags {"leaks"} {
|
|
|
|
test "Check for memory leaks (pid $pid)" {
|
2012-12-03 06:06:38 -05:00
|
|
|
set output {0 leaks}
|
2020-12-23 09:28:17 -05:00
|
|
|
catch {exec leaks $pid} output option
|
|
|
|
# In a few tests we kill the server process, so leaks will not find it.
|
|
|
|
# It'll exits with exit code >1 on error, so we ignore these.
|
|
|
|
if {[dict exists $option -errorcode]} {
|
|
|
|
set details [dict get $option -errorcode]
|
|
|
|
if {[lindex $details 0] eq "CHILDSTATUS"} {
|
|
|
|
set status [lindex $details 2]
|
|
|
|
if {$status > 1} {
|
|
|
|
set output "0 leaks"
|
|
|
|
}
|
|
|
|
}
|
2014-03-25 04:33:27 -04:00
|
|
|
}
|
2012-12-03 06:06:38 -05:00
|
|
|
set output
|
2010-06-02 18:27:09 -04:00
|
|
|
} {*0 leaks*}
|
|
|
|
}
|
2010-06-02 15:20:29 -04:00
|
|
|
}
|
2010-05-14 11:49:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
# kill server and wait for the process to be totally exited
|
2020-02-21 11:08:45 -05:00
|
|
|
send_data_packet $::test_server_fd server-killing $pid
|
2012-04-03 08:18:35 -04:00
|
|
|
catch {exec kill $pid}
|
2021-02-24 17:26:16 -05:00
|
|
|
# Node might have been stopped in the test
|
|
|
|
catch {exec kill -SIGCONT $pid}
|
2015-03-31 17:43:38 -04:00
|
|
|
if {$::valgrind} {
|
2021-10-26 01:34:30 -04:00
|
|
|
set max_wait 120000
|
2015-03-31 17:43:38 -04:00
|
|
|
} else {
|
|
|
|
set max_wait 10000
|
|
|
|
}
|
2023-10-02 01:20:53 -04:00
|
|
|
while {[is_alive $pid]} {
|
2012-04-03 08:18:35 -04:00
|
|
|
incr wait 10
|
|
|
|
|
2021-10-26 01:34:30 -04:00
|
|
|
if {$wait == $max_wait} {
|
|
|
|
puts "Forcing process $pid to crash..."
|
|
|
|
catch {exec kill -SEGV $pid}
|
|
|
|
} elseif {$wait >= $max_wait * 2} {
|
2012-04-03 08:18:35 -04:00
|
|
|
puts "Forcing process $pid to exit..."
|
|
|
|
catch {exec kill -KILL $pid}
|
|
|
|
} elseif {$wait % 1000 == 0} {
|
2010-05-19 08:33:39 -04:00
|
|
|
puts "Waiting for process $pid to exit..."
|
2010-05-14 11:45:27 -04:00
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
|
|
|
|
# Check valgrind errors if needed
|
|
|
|
if {$::valgrind} {
|
|
|
|
check_valgrind_errors [dict get $config stderr]
|
|
|
|
}
|
2014-11-28 05:38:14 -05:00
|
|
|
|
2021-11-11 06:51:33 -05:00
|
|
|
check_sanitizer_errors [dict get $config stderr]
|
|
|
|
|
2014-11-28 05:38:14 -05:00
|
|
|
# Remove this pid from the set of active pids in the test server.
|
|
|
|
send_data_packet $::test_server_fd server-killed $pid
|
2010-05-14 11:45:27 -04:00
|
|
|
}
|
|
|
|
|
2023-10-02 01:20:53 -04:00
|
|
|
proc is_alive pid {
|
2020-10-18 07:50:29 -04:00
|
|
|
if {[catch {exec kill -0 $pid} err]} {
|
2010-05-19 08:33:39 -04:00
|
|
|
return 0
|
|
|
|
} else {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-21 06:00:13 -04:00
|
|
|
proc ping_server {host port} {
|
|
|
|
set retval 0
|
|
|
|
if {[catch {
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
|
|
|
set fd [::tls::socket $host $port]
|
|
|
|
} else {
|
|
|
|
set fd [socket $host $port]
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
fconfigure $fd -translation binary
|
|
|
|
puts $fd "PING\r\n"
|
|
|
|
flush $fd
|
|
|
|
set reply [gets $fd]
|
2013-02-12 10:25:41 -05:00
|
|
|
if {[string range $reply 0 0] eq {+} ||
|
|
|
|
[string range $reply 0 0] eq {-}} {
|
2010-05-21 06:00:13 -04:00
|
|
|
set retval 1
|
|
|
|
}
|
|
|
|
close $fd
|
|
|
|
} e]} {
|
2010-12-10 10:13:21 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts -nonewline "."
|
|
|
|
}
|
2010-08-31 05:17:06 -04:00
|
|
|
} else {
|
2010-12-10 10:13:21 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts -nonewline "ok"
|
|
|
|
}
|
2010-05-21 06:00:13 -04:00
|
|
|
}
|
|
|
|
return $retval
|
|
|
|
}
|
|
|
|
|
2014-02-17 06:29:54 -05:00
|
|
|
# Return 1 if the server at the specified addr is reachable by PING, otherwise
|
|
|
|
# returns 0. Performs a try every 50 milliseconds for the specified number
|
|
|
|
# of retries.
|
|
|
|
proc server_is_up {host port retrynum} {
|
|
|
|
after 10 ;# Use a small delay to make likely a first-try success.
|
|
|
|
set retval 0
|
|
|
|
while {[incr retrynum -1]} {
|
|
|
|
if {[catch {ping_server $host $port} ping]} {
|
|
|
|
set ping 0
|
|
|
|
}
|
|
|
|
if {$ping} {return 1}
|
|
|
|
after 50
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-01-17 08:48:48 -05:00
|
|
|
# Check if current ::tags match requested tags. If ::allowtags are used,
|
|
|
|
# there must be some intersection. If ::denytags are used, no intersection
|
|
|
|
# is allowed. Returns 1 if tags are acceptable or 0 otherwise, in which
|
|
|
|
# case err_return names a return variable for the message to be logged.
|
2021-06-09 08:13:24 -04:00
|
|
|
proc tags_acceptable {tags err_return} {
|
2021-01-17 08:48:48 -05:00
|
|
|
upvar $err_return err
|
|
|
|
|
|
|
|
# If tags are whitelisted, make sure there's match
|
|
|
|
if {[llength $::allowtags] > 0} {
|
|
|
|
set matched 0
|
|
|
|
foreach tag $::allowtags {
|
2021-06-09 08:13:24 -04:00
|
|
|
if {[lsearch $tags $tag] >= 0} {
|
2021-01-17 08:48:48 -05:00
|
|
|
incr matched
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if {$matched < 1} {
|
|
|
|
set err "Tag: none of the tags allowed"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
foreach tag $::denytags {
|
2021-06-09 08:13:24 -04:00
|
|
|
if {[lsearch $tags $tag] >= 0} {
|
2021-01-17 08:48:48 -05:00
|
|
|
set err "Tag: $tag denied"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add reply_schema to command json files (internal for now) (#10273)
Work in progress towards implementing a reply schema as part of COMMAND DOCS, see #9845
Since ironing the details of the reply schema of each and every command can take a long time, we
would like to merge this PR when the infrastructure is ready, and let this mature in the unstable branch.
Meanwhile the changes of this PR are internal, they are part of the repo, but do not affect the produced build.
### Background
In #9656 we add a lot of information about Redis commands, but we are missing information about the replies
### Motivation
1. Documentation. This is the primary goal.
2. It should be possible, based on the output of COMMAND, to be able to generate client code in typed
languages. In order to do that, we need Redis to tell us, in detail, what each reply looks like.
3. We would like to build a fuzzer that verifies the reply structure (for now we use the existing
testsuite, see the "Testing" section)
### Schema
The idea is to supply some sort of schema for the various replies of each command.
The schema will describe the conceptual structure of the reply (for generated clients), as defined in RESP3.
Note that the reply structure itself may change, depending on the arguments (e.g. `XINFO STREAM`, with
and without the `FULL` modifier)
We decided to use the standard json-schema (see https://json-schema.org/) as the reply-schema.
Example for `BZPOPMIN`:
```
"reply_schema": {
"oneOf": [
{
"description": "Timeout reached and no elements were popped.",
"type": "null"
},
{
"description": "The keyname, popped member, and its score.",
"type": "array",
"minItems": 3,
"maxItems": 3,
"items": [
{
"description": "Keyname",
"type": "string"
},
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
]
}
```
#### Notes
1. It is ok that some commands' reply structure depends on the arguments and it's the caller's responsibility
to know which is the relevant one. this comes after looking at other request-reply systems like OpenAPI,
where the reply schema can also be oneOf and the caller is responsible to know which schema is the relevant one.
2. The reply schemas will describe RESP3 replies only. even though RESP3 is structured, we want to use reply
schema for documentation (and possibly to create a fuzzer that validates the replies)
3. For documentation, the description field will include an explanation of the scenario in which the reply is sent,
including any relation to arguments. for example, for `ZRANGE`'s two schemas we will need to state that one
is with `WITHSCORES` and the other is without.
4. For documentation, there will be another optional field "notes" in which we will add a short description of
the representation in RESP2, in case it's not trivial (RESP3's `ZRANGE`'s nested array vs. RESP2's flat
array, for example)
Given the above:
1. We can generate the "return" section of all commands in [redis-doc](https://redis.io/commands/)
(given that "description" and "notes" are comprehensive enough)
2. We can generate a client in a strongly typed language (but the return type could be a conceptual
`union` and the caller needs to know which schema is relevant). see the section below for RESP2 support.
3. We can create a fuzzer for RESP3.
### Limitations (because we are using the standard json-schema)
The problem is that Redis' replies are more diverse than what the json format allows. This means that,
when we convert the reply to a json (in order to validate the schema against it), we lose information (see
the "Testing" section below).
The other option would have been to extend the standard json-schema (and json format) to include stuff
like sets, bulk-strings, error-string, etc. but that would mean also extending the schema-validator - and that
seemed like too much work, so we decided to compromise.
Examples:
1. We cannot tell the difference between an "array" and a "set"
2. We cannot tell the difference between simple-string and bulk-string
3. we cannot verify true uniqueness of items in commands like ZRANGE: json-schema doesn't cover the
case of two identical members with different scores (e.g. `[["m1",6],["m1",7]]`) because `uniqueItems`
compares (member,score) tuples and not just the member name.
### Testing
This commit includes some changes inside Redis in order to verify the schemas (existing and future ones)
are indeed correct (i.e. describe the actual response of Redis).
To do that, we added a debugging feature to Redis that causes it to produce a log of all the commands
it executed and their replies.
For that, Redis needs to be compiled with `-DLOG_REQ_RES` and run with
`--reg-res-logfile <file> --client-default-resp 3` (the testsuite already does that if you run it with
`--log-req-res --force-resp3`)
You should run the testsuite with the above args (and `--dont-clean`) in order to make Redis generate
`.reqres` files (same dir as the `stdout` files) which contain request-response pairs.
These files are later on processed by `./utils/req-res-log-validator.py` which does:
1. Goes over req-res files, generated by redis-servers, spawned by the testsuite (see logreqres.c)
2. For each request-response pair, it validates the response against the request's reply_schema
(obtained from the extended COMMAND DOCS)
5. In order to get good coverage of the Redis commands, and all their different replies, we chose to use
the existing redis test suite, rather than attempt to write a fuzzer.
#### Notes about RESP2
1. We will not be able to use the testing tool to verify RESP2 replies (we are ok with that, it's time to
accept RESP3 as the future RESP)
2. Since the majority of the test suite is using RESP2, and we want the server to reply with RESP3
so that we can validate it, we will need to know how to convert the actual reply to the one expected.
- number and boolean are always strings in RESP2 so the conversion is easy
- objects (maps) are always a flat array in RESP2
- others (nested array in RESP3's `ZRANGE` and others) will need some special per-command
handling (so the client will not be totally auto-generated)
Example for ZRANGE:
```
"reply_schema": {
"anyOf": [
{
"description": "A list of member elements",
"type": "array",
"uniqueItems": true,
"items": {
"type": "string"
}
},
{
"description": "Members and their scores. Returned in case `WITHSCORES` was used.",
"notes": "In RESP2 this is returned as a flat array",
"type": "array",
"uniqueItems": true,
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
}
]
}
```
### Other changes
1. Some tests that behave differently depending on the RESP are now being tested for both RESP,
regardless of the special log-req-res mode ("Pub/Sub PING" for example)
2. Update the history field of CLIENT LIST
3. Added basic tests for commands that were not covered at all by the testsuite
### TODO
- [x] (maybe a different PR) add a "condition" field to anyOf/oneOf schemas that refers to args. e.g.
when `SET` return NULL, the condition is `arguments.get||arguments.condition`, for `OK` the condition
is `!arguments.get`, and for `string` the condition is `arguments.get` - https://github.com/redis/redis/issues/11896
- [x] (maybe a different PR) also run `runtest-cluster` in the req-res logging mode
- [x] add the new tests to GH actions (i.e. compile with `-DLOG_REQ_RES`, run the tests, and run the validator)
- [x] (maybe a different PR) figure out a way to warn about (sub)schemas that are uncovered by the output
of the tests - https://github.com/redis/redis/issues/11897
- [x] (probably a separate PR) add all missing schemas
- [x] check why "SDOWN is triggered by misconfigured instance replying with errors" fails with --log-req-res
- [x] move the response transformers to their own file (run both regular, cluster, and sentinel tests - need to
fight with the tcl including mechanism a bit)
- [x] issue: module API - https://github.com/redis/redis/issues/11898
- [x] (probably a separate PR): improve schemas: add `required` to `object`s - https://github.com/redis/redis/issues/11899
Co-authored-by: Ozan Tezcan <ozantezcan@gmail.com>
Co-authored-by: Hanna Fadida <hanna.fadida@redislabs.com>
Co-authored-by: Oran Agra <oran@redislabs.com>
Co-authored-by: Shaya Potter <shaya@redislabs.com>
2023-03-11 03:14:16 -05:00
|
|
|
# some units mess with the client output buffer so we can't really use the req-res logging mechanism.
|
|
|
|
if {$::log_req_res && [lsearch $tags "logreqres:skip"] >= 0} {
|
|
|
|
set err "Not supported when running in log-req-res mode"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-06-09 08:13:24 -04:00
|
|
|
if {$::external && [lsearch $tags "external:skip"] >= 0} {
|
|
|
|
set err "Not supported on external server"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if {$::singledb && [lsearch $tags "singledb:skip"] >= 0} {
|
|
|
|
set err "Not supported on singledb"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
if {$::cluster_mode && [lsearch $tags "cluster:skip"] >= 0} {
|
|
|
|
set err "Not supported in cluster mode"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-09-26 10:45:02 -04:00
|
|
|
if {$::tls && [lsearch $tags "tls:skip"] >= 0} {
|
|
|
|
set err "Not supported in tls mode"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-11-03 14:47:18 -04:00
|
|
|
if {!$::large_memory && [lsearch $tags "large-memory"] >= 0} {
|
|
|
|
set err "large memory flag not provided"
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2021-01-17 08:48:48 -05:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2010-06-02 16:53:22 -04:00
|
|
|
# doesn't really belong here, but highly coupled to code in start_server
|
|
|
|
proc tags {tags code} {
|
2021-06-10 08:39:33 -04:00
|
|
|
# If we 'tags' contain multiple tags, quoted and separated by spaces,
|
2020-08-31 04:05:30 -04:00
|
|
|
# we want to get rid of the quotes in order to have a proper list
|
|
|
|
set tags [string map { \" "" } $tags]
|
2010-06-02 16:53:22 -04:00
|
|
|
set ::tags [concat $::tags $tags]
|
2021-06-09 08:13:24 -04:00
|
|
|
if {![tags_acceptable $::tags err]} {
|
2021-01-17 08:48:48 -05:00
|
|
|
incr ::num_aborted
|
|
|
|
send_data_packet $::test_server_fd ignore $err
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
2020-08-31 04:05:30 -04:00
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
uplevel 1 $code
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
}
|
2020-02-24 04:46:23 -05:00
|
|
|
|
|
|
|
# Write the configuration in the dictionary 'config' in the specified
|
|
|
|
# file name.
|
2022-07-12 13:41:29 -04:00
|
|
|
proc create_server_config_file {filename config config_lines} {
|
2020-02-24 04:46:23 -05:00
|
|
|
set fp [open $filename w+]
|
|
|
|
foreach directive [dict keys $config] {
|
|
|
|
puts -nonewline $fp "$directive "
|
|
|
|
puts $fp [dict get $config $directive]
|
|
|
|
}
|
2022-07-12 13:41:29 -04:00
|
|
|
foreach {config_line_directive config_line_args} $config_lines {
|
|
|
|
puts $fp "$config_line_directive $config_line_args"
|
|
|
|
}
|
2020-02-24 04:46:23 -05:00
|
|
|
close $fp
|
|
|
|
}
|
|
|
|
|
2022-05-09 06:37:49 -04:00
|
|
|
proc spawn_server {config_file stdout stderr args} {
|
|
|
|
set cmd [list src/redis-server $config_file]
|
|
|
|
set args {*}$args
|
|
|
|
if {[llength $args] > 0} {
|
|
|
|
lappend cmd {*}$args
|
|
|
|
}
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
if {$::valgrind} {
|
2022-05-09 06:37:49 -04:00
|
|
|
set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full {*}$cmd >> $stdout 2>> $stderr &]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
} elseif ($::stack_logging) {
|
2022-05-09 06:37:49 -04:00
|
|
|
set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt {*}$cmd >> $stdout 2>> $stderr &]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
} else {
|
2021-11-11 06:51:33 -05:00
|
|
|
# ASAN_OPTIONS environment variable is for address sanitizer. If a test
|
|
|
|
# tries to allocate huge memory area and expects allocator to return
|
|
|
|
# NULL, address sanitizer throws an error without this setting.
|
2022-05-09 06:37:49 -04:00
|
|
|
set pid [exec /usr/bin/env ASAN_OPTIONS=allocator_may_return_null=1 {*}$cmd >> $stdout 2>> $stderr &]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if {$::wait_server} {
|
|
|
|
set msg "server started PID: $pid. press any key to continue..."
|
|
|
|
puts $msg
|
|
|
|
read stdin 1
|
|
|
|
}
|
|
|
|
|
|
|
|
# Tell the test server about this new instance.
|
|
|
|
send_data_packet $::test_server_fd server-spawned $pid
|
|
|
|
return $pid
|
|
|
|
}
|
|
|
|
|
|
|
|
# Wait for actual startup, return 1 if port is busy, 0 otherwise
|
|
|
|
proc wait_server_started {config_file stdout pid} {
|
|
|
|
set checkperiod 100; # Milliseconds
|
|
|
|
set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes.
|
|
|
|
set port_busy 0
|
|
|
|
while 1 {
|
Build TLS as a loadable module
* Support BUILD_TLS=module to be loaded as a module via config file or
command line. e.g. redis-server --loadmodule redis-tls.so
* Updates to redismodule.h to allow it to be used side by side with
server.h by defining REDISMODULE_CORE_MODULE
* Changes to server.h, redismodule.h and module.c to avoid repeated
type declarations (gcc 4.8 doesn't like these)
* Add a mechanism for non-ABI neutral modules (ones who include
server.h) to refuse loading if they detect not being built together with
redis (release.c)
* Fix wrong signature of RedisModuleDefragFunc, this could break
compilation of a module, but not the ABI
* Move initialization of listeners in server.c to be after loading
the modules
* Config TLS after initialization of listeners
* Init cluster after initialization of listeners
* Add TLS module to CI
* Fix a test suite race conditions:
Now that the listeners are initialized later, it's not sufficient to
wait for the PID message in the log, we need to wait for the "Server
Initialized" message.
* Fix issues with moduleconfigs test as a result from start_server
waiting for "Server Initialized"
* Fix issues with modules/infra test as a result of an additional module
present
Notes about Sentinel:
Sentinel can't really rely on the tls module, since it uses hiredis to
initiate connections and depends on OpenSSL (won't be able to use any
other connection modules for that), so it was decided that when TLS is
built as a module, sentinel does not support TLS at all.
This means that it keeps using redis_tls_ctx and redis_tls_client_ctx directly.
Example code of config in redis-tls.so(may be use in the future):
RedisModuleString *tls_cfg = NULL;
void tlsInfo(RedisModuleInfoCtx *ctx, int for_crash_report) {
UNUSED(for_crash_report);
RedisModule_InfoAddSection(ctx, "");
RedisModule_InfoAddFieldLongLong(ctx, "var", 42);
}
int tlsCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 2) return RedisModule_WrongArity(ctx);
return RedisModule_ReplyWithString(ctx, argv[1]);
}
RedisModuleString *getStringConfigCommand(const char *name, void *privdata) {
REDISMODULE_NOT_USED(name);
REDISMODULE_NOT_USED(privdata);
return tls_cfg;
}
int setStringConfigCommand(const char *name, RedisModuleString *new, void *privdata, RedisModuleString **err) {
REDISMODULE_NOT_USED(name);
REDISMODULE_NOT_USED(err);
REDISMODULE_NOT_USED(privdata);
if (tls_cfg) RedisModule_FreeString(NULL, tls_cfg);
RedisModule_RetainString(NULL, new);
tls_cfg = new;
return REDISMODULE_OK;
}
int RedisModule_OnLoad(void *ctx, RedisModuleString **argv, int argc)
{
....
if (RedisModule_CreateCommand(ctx,"tls",tlsCommand,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_RegisterStringConfig(ctx, "cfg", "", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
if (tls_cfg) {
RedisModule_FreeString(ctx, tls_cfg);
tls_cfg = NULL;
}
return REDISMODULE_ERR;
}
...
}
Co-authored-by: zhenwei pi <pizhenwei@bytedance.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2022-08-22 03:53:56 -04:00
|
|
|
if {[regexp -- " PID: $pid.*Server initialized" [exec cat $stdout]]} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
after $checkperiod
|
|
|
|
incr maxiter -1
|
|
|
|
if {$maxiter == 0} {
|
|
|
|
start_server_error $config_file "No PID detected in log $stdout"
|
|
|
|
puts "--- LOG CONTENT ---"
|
|
|
|
puts [exec cat $stdout]
|
|
|
|
puts "-------------------"
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
# Check if the port is actually busy and the server failed
|
|
|
|
# for this reason.
|
2021-03-16 11:25:30 -04:00
|
|
|
if {[regexp {Failed listening on port} [exec cat $stdout]]} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set port_busy 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return $port_busy
|
|
|
|
}
|
|
|
|
|
2021-02-07 05:37:24 -05:00
|
|
|
proc dump_server_log {srv} {
|
|
|
|
set pid [dict get $srv "pid"]
|
|
|
|
puts "\n===== Start of server log (pid $pid) =====\n"
|
|
|
|
puts [exec cat [dict get $srv "stdout"]]
|
|
|
|
puts "===== End of server log (pid $pid) =====\n"
|
2021-11-11 06:51:33 -05:00
|
|
|
|
|
|
|
puts "\n===== Start of server stderr log (pid $pid) =====\n"
|
|
|
|
puts [exec cat [dict get $srv "stderr"]]
|
|
|
|
puts "===== End of server stderr log (pid $pid) =====\n"
|
2021-02-07 05:37:24 -05:00
|
|
|
}
|
|
|
|
|
2021-06-09 08:13:24 -04:00
|
|
|
proc run_external_server_test {code overrides} {
|
|
|
|
set srv {}
|
|
|
|
dict set srv "host" $::host
|
|
|
|
dict set srv "port" $::port
|
|
|
|
set client [redis $::host $::port 0 $::tls]
|
|
|
|
dict set srv "client" $client
|
|
|
|
if {!$::singledb} {
|
|
|
|
$client select 9
|
|
|
|
}
|
|
|
|
|
|
|
|
set config {}
|
|
|
|
dict set config "port" $::port
|
|
|
|
dict set srv "config" $config
|
|
|
|
|
|
|
|
# append the server to the stack
|
|
|
|
lappend ::servers $srv
|
|
|
|
|
|
|
|
if {[llength $::servers] > 1} {
|
|
|
|
if {$::verbose} {
|
|
|
|
puts "Notice: nested start_server statements in external server mode, test must be aware of that!"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r flushall
|
2021-12-16 10:58:25 -05:00
|
|
|
r function flush
|
2021-06-09 08:13:24 -04:00
|
|
|
|
|
|
|
# store overrides
|
|
|
|
set saved_config {}
|
|
|
|
foreach {param val} $overrides {
|
|
|
|
dict set saved_config $param [lindex [r config get $param] 1]
|
|
|
|
r config set $param $val
|
|
|
|
|
|
|
|
# If we enable appendonly, wait for for rewrite to complete. This is
|
|
|
|
# required for tests that begin with a bg* command which will fail if
|
|
|
|
# the rewriteaof operation is not completed at this point.
|
|
|
|
if {$param == "appendonly" && $val == "yes"} {
|
|
|
|
waitForBgrewriteaof r
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if {[catch {set retval [uplevel 2 $code]} error]} {
|
|
|
|
if {$::durable} {
|
|
|
|
set msg [string range $error 10 end]
|
|
|
|
lappend details $msg
|
|
|
|
lappend details $::errorInfo
|
|
|
|
lappend ::tests_failed $details
|
|
|
|
|
|
|
|
incr ::num_failed
|
|
|
|
send_data_packet $::test_server_fd err [join $details "\n"]
|
|
|
|
} else {
|
|
|
|
# Re-raise, let handler up the stack take care of this.
|
|
|
|
error $error $::errorInfo
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
# restore overrides
|
|
|
|
dict for {param val} $saved_config {
|
|
|
|
r config set $param $val
|
|
|
|
}
|
|
|
|
|
2021-11-15 04:07:43 -05:00
|
|
|
set srv [lpop ::servers]
|
|
|
|
|
|
|
|
if {[dict exists $srv "client"]} {
|
|
|
|
[dict get $srv "client"] close
|
|
|
|
}
|
2021-06-09 08:13:24 -04:00
|
|
|
}
|
|
|
|
|
2010-06-02 16:23:52 -04:00
|
|
|
proc start_server {options {code undefined}} {
|
|
|
|
# setup defaults
|
|
|
|
set baseconfig "default.conf"
|
|
|
|
set overrides {}
|
2020-12-11 11:31:40 -05:00
|
|
|
set omit {}
|
2010-06-02 16:53:22 -04:00
|
|
|
set tags {}
|
2022-05-09 06:37:49 -04:00
|
|
|
set args {}
|
2020-08-31 03:44:43 -04:00
|
|
|
set keep_persistence false
|
2022-07-12 13:41:29 -04:00
|
|
|
set config_lines {}
|
2010-06-02 16:23:52 -04:00
|
|
|
|
2023-10-02 01:20:53 -04:00
|
|
|
# Wait for the server to be ready and check for server liveness/client connectivity before starting the test.
|
|
|
|
set wait_ready true
|
|
|
|
|
2010-06-02 16:23:52 -04:00
|
|
|
# parse options
|
|
|
|
foreach {option value} $options {
|
|
|
|
switch $option {
|
2010-06-02 16:53:22 -04:00
|
|
|
"config" {
|
2020-08-31 03:44:43 -04:00
|
|
|
set baseconfig $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
"overrides" {
|
2022-10-03 02:25:16 -04:00
|
|
|
set overrides [concat $overrides $value]
|
2020-08-31 03:44:43 -04:00
|
|
|
}
|
2022-07-12 13:41:29 -04:00
|
|
|
"config_lines" {
|
|
|
|
set config_lines $value
|
|
|
|
}
|
2022-05-09 06:37:49 -04:00
|
|
|
"args" {
|
|
|
|
set args $value
|
|
|
|
}
|
2020-12-11 11:31:40 -05:00
|
|
|
"omit" {
|
|
|
|
set omit $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
"tags" {
|
2021-06-10 08:39:33 -04:00
|
|
|
# If we 'tags' contain multiple tags, quoted and separated by spaces,
|
2020-08-31 04:05:30 -04:00
|
|
|
# we want to get rid of the quotes in order to have a proper list
|
|
|
|
set tags [string map { \" "" } $value]
|
|
|
|
set ::tags [concat $::tags $tags]
|
2020-08-31 03:44:43 -04:00
|
|
|
}
|
|
|
|
"keep_persistence" {
|
|
|
|
set keep_persistence $value
|
|
|
|
}
|
2023-10-02 01:20:53 -04:00
|
|
|
"wait_ready" {
|
|
|
|
set wait_ready $value
|
|
|
|
}
|
2010-06-02 16:53:22 -04:00
|
|
|
default {
|
2020-08-31 03:44:43 -04:00
|
|
|
error "Unknown option $option"
|
|
|
|
}
|
2010-06-02 16:23:52 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-31 04:05:30 -04:00
|
|
|
# We skip unwanted tags
|
2021-06-09 08:13:24 -04:00
|
|
|
if {![tags_acceptable $::tags err]} {
|
2021-01-17 08:48:48 -05:00
|
|
|
incr ::num_aborted
|
|
|
|
send_data_packet $::test_server_fd ignore $err
|
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
2020-08-31 04:05:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
# If we are running against an external server, we just push the
|
|
|
|
# host/port pair in the stack the first time
|
|
|
|
if {$::external} {
|
2021-06-09 08:13:24 -04:00
|
|
|
run_external_server_test $code $overrides
|
|
|
|
|
2020-08-31 04:05:30 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2010-06-02 16:23:52 -04:00
|
|
|
set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
|
2010-05-14 11:31:11 -04:00
|
|
|
set config {}
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
Build TLS as a loadable module
* Support BUILD_TLS=module to be loaded as a module via config file or
command line. e.g. redis-server --loadmodule redis-tls.so
* Updates to redismodule.h to allow it to be used side by side with
server.h by defining REDISMODULE_CORE_MODULE
* Changes to server.h, redismodule.h and module.c to avoid repeated
type declarations (gcc 4.8 doesn't like these)
* Add a mechanism for non-ABI neutral modules (ones who include
server.h) to refuse loading if they detect not being built together with
redis (release.c)
* Fix wrong signature of RedisModuleDefragFunc, this could break
compilation of a module, but not the ABI
* Move initialization of listeners in server.c to be after loading
the modules
* Config TLS after initialization of listeners
* Init cluster after initialization of listeners
* Add TLS module to CI
* Fix a test suite race conditions:
Now that the listeners are initialized later, it's not sufficient to
wait for the PID message in the log, we need to wait for the "Server
Initialized" message.
* Fix issues with moduleconfigs test as a result from start_server
waiting for "Server Initialized"
* Fix issues with modules/infra test as a result of an additional module
present
Notes about Sentinel:
Sentinel can't really rely on the tls module, since it uses hiredis to
initiate connections and depends on OpenSSL (won't be able to use any
other connection modules for that), so it was decided that when TLS is
built as a module, sentinel does not support TLS at all.
This means that it keeps using redis_tls_ctx and redis_tls_client_ctx directly.
Example code of config in redis-tls.so(may be use in the future):
RedisModuleString *tls_cfg = NULL;
void tlsInfo(RedisModuleInfoCtx *ctx, int for_crash_report) {
UNUSED(for_crash_report);
RedisModule_InfoAddSection(ctx, "");
RedisModule_InfoAddFieldLongLong(ctx, "var", 42);
}
int tlsCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 2) return RedisModule_WrongArity(ctx);
return RedisModule_ReplyWithString(ctx, argv[1]);
}
RedisModuleString *getStringConfigCommand(const char *name, void *privdata) {
REDISMODULE_NOT_USED(name);
REDISMODULE_NOT_USED(privdata);
return tls_cfg;
}
int setStringConfigCommand(const char *name, RedisModuleString *new, void *privdata, RedisModuleString **err) {
REDISMODULE_NOT_USED(name);
REDISMODULE_NOT_USED(err);
REDISMODULE_NOT_USED(privdata);
if (tls_cfg) RedisModule_FreeString(NULL, tls_cfg);
RedisModule_RetainString(NULL, new);
tls_cfg = new;
return REDISMODULE_OK;
}
int RedisModule_OnLoad(void *ctx, RedisModuleString **argv, int argc)
{
....
if (RedisModule_CreateCommand(ctx,"tls",tlsCommand,"",0,0,0) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_RegisterStringConfig(ctx, "cfg", "", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
if (tls_cfg) {
RedisModule_FreeString(ctx, tls_cfg);
tls_cfg = NULL;
}
return REDISMODULE_ERR;
}
...
}
Co-authored-by: zhenwei pi <pizhenwei@bytedance.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2022-08-22 03:53:56 -04:00
|
|
|
if {$::tls_module} {
|
|
|
|
lappend config_lines [list "loadmodule" [format "%s/src/redis-tls.so" [pwd]]]
|
|
|
|
}
|
2020-12-11 11:31:40 -05:00
|
|
|
dict set config "tls-cert-file" [format "%s/tests/tls/server.crt" [pwd]]
|
|
|
|
dict set config "tls-key-file" [format "%s/tests/tls/server.key" [pwd]]
|
|
|
|
dict set config "tls-client-cert-file" [format "%s/tests/tls/client.crt" [pwd]]
|
|
|
|
dict set config "tls-client-key-file" [format "%s/tests/tls/client.key" [pwd]]
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set config "tls-dh-params-file" [format "%s/tests/tls/redis.dh" [pwd]]
|
|
|
|
dict set config "tls-ca-cert-file" [format "%s/tests/tls/ca.crt" [pwd]]
|
|
|
|
dict set config "loglevel" "debug"
|
|
|
|
}
|
2010-05-14 11:31:11 -04:00
|
|
|
foreach line $data {
|
|
|
|
if {[string length $line] > 0 && [string index $line 0] ne "#"} {
|
|
|
|
set elements [split $line " "]
|
|
|
|
set directive [lrange $elements 0 0]
|
|
|
|
set arguments [lrange $elements 1 end]
|
|
|
|
dict set config $directive $arguments
|
|
|
|
}
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# use a different directory every time a server is started
|
|
|
|
dict set config dir [tmpdir server]
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2010-05-14 11:42:07 -04:00
|
|
|
# start every server on a different port
|
2020-05-26 04:00:48 -04:00
|
|
|
set port [find_available_port $::baseport $::portcount]
|
2019-09-12 03:56:54 -04:00
|
|
|
if {$::tls} {
|
Support TLS service when "tls-cluster" is not enabled and persist both plain and TLS port in nodes.conf (#12233)
Originally, when "tls-cluster" is enabled, `port` is set to TLS port. In order to support non-TLS clients, `pport` is used to propagate TCP port across cluster nodes. However when "tls-cluster" is disabled, `port` is set to TCP port, and `pport` is not used, which means the cluster cannot provide TLS service unless "tls-cluster" is on.
```
typedef struct {
// ...
uint16_t port; /* Latest known clients port (TLS or plain). */
uint16_t pport; /* Latest known clients plaintext port. Only used if the main clients port is for TLS. */
// ...
} clusterNode;
```
```
typedef struct {
// ...
uint16_t port; /* TCP base port number. */
uint16_t pport; /* Sender TCP plaintext port, if base port is TLS */
// ...
} clusterMsg;
```
This PR renames `port` and `pport` in `clusterNode` to `tcp_port` and `tls_port`, to record both ports no matter "tls-cluster" is enabled or disabled.
This allows to provide TLS service to clients when "tls-cluster" is disabled: when displaying cluster topology, or giving `MOVED` error, server can provide TLS or TCP port according to client's connection type, no matter what type of connection cluster bus is using.
For backwards compatibility, `port` and `pport` in `clusterMsg` are preserved, when "tls-cluster" is enabled, `port` is set to TLS port and `pport` is set to TCP port, when "tls-cluster" is disabled, `port` is set to TCP port and `pport` is set to TLS port (instead of 0).
Also, in the nodes.conf file, a new aux field displaying an extra port is added to complete the persisted info. We may have `tls_port=xxxxx` or `tcp_port=xxxxx` in the aux field, to complete the cluster topology, while the other port is stored in the normal `<ip>:<port>` field. The format is shown below.
```
<node-id> <ip>:<tcp_port>@<cport>,<hostname>,shard-id=...,tls-port=6379 myself,master - 0 0 0 connected 0-1000
```
Or we can switch the position of two ports, both can be correctly resolved.
```
<node-id> <ip>:<tls_port>@<cport>,<hostname>,shard-id=...,tcp-port=6379 myself,master - 0 0 0 connected 0-1000
```
2023-06-26 10:43:38 -04:00
|
|
|
set pport [find_available_port $::baseport $::portcount]
|
|
|
|
dict set config "port" $pport
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config "tls-port" $port
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set config "tls-cluster" "yes"
|
|
|
|
dict set config "tls-replication" "yes"
|
|
|
|
} else {
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config port $port
|
2019-09-12 03:56:54 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
set unixsocket [file normalize [format "%s/%s" [dict get $config "dir"] "socket"]]
|
|
|
|
dict set config "unixsocket" $unixsocket
|
2010-05-14 11:42:07 -04:00
|
|
|
|
2010-05-20 07:58:58 -04:00
|
|
|
# apply overrides from global space and arguments
|
2010-06-02 16:23:52 -04:00
|
|
|
foreach {directive arguments} [concat $::global_overrides $overrides] {
|
2010-05-14 11:31:11 -04:00
|
|
|
dict set config $directive $arguments
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2020-12-11 11:31:40 -05:00
|
|
|
# remove directives that are marked to be omitted
|
|
|
|
foreach directive $omit {
|
|
|
|
dict unset config $directive
|
|
|
|
}
|
|
|
|
|
Add reply_schema to command json files (internal for now) (#10273)
Work in progress towards implementing a reply schema as part of COMMAND DOCS, see #9845
Since ironing the details of the reply schema of each and every command can take a long time, we
would like to merge this PR when the infrastructure is ready, and let this mature in the unstable branch.
Meanwhile the changes of this PR are internal, they are part of the repo, but do not affect the produced build.
### Background
In #9656 we add a lot of information about Redis commands, but we are missing information about the replies
### Motivation
1. Documentation. This is the primary goal.
2. It should be possible, based on the output of COMMAND, to be able to generate client code in typed
languages. In order to do that, we need Redis to tell us, in detail, what each reply looks like.
3. We would like to build a fuzzer that verifies the reply structure (for now we use the existing
testsuite, see the "Testing" section)
### Schema
The idea is to supply some sort of schema for the various replies of each command.
The schema will describe the conceptual structure of the reply (for generated clients), as defined in RESP3.
Note that the reply structure itself may change, depending on the arguments (e.g. `XINFO STREAM`, with
and without the `FULL` modifier)
We decided to use the standard json-schema (see https://json-schema.org/) as the reply-schema.
Example for `BZPOPMIN`:
```
"reply_schema": {
"oneOf": [
{
"description": "Timeout reached and no elements were popped.",
"type": "null"
},
{
"description": "The keyname, popped member, and its score.",
"type": "array",
"minItems": 3,
"maxItems": 3,
"items": [
{
"description": "Keyname",
"type": "string"
},
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
]
}
```
#### Notes
1. It is ok that some commands' reply structure depends on the arguments and it's the caller's responsibility
to know which is the relevant one. this comes after looking at other request-reply systems like OpenAPI,
where the reply schema can also be oneOf and the caller is responsible to know which schema is the relevant one.
2. The reply schemas will describe RESP3 replies only. even though RESP3 is structured, we want to use reply
schema for documentation (and possibly to create a fuzzer that validates the replies)
3. For documentation, the description field will include an explanation of the scenario in which the reply is sent,
including any relation to arguments. for example, for `ZRANGE`'s two schemas we will need to state that one
is with `WITHSCORES` and the other is without.
4. For documentation, there will be another optional field "notes" in which we will add a short description of
the representation in RESP2, in case it's not trivial (RESP3's `ZRANGE`'s nested array vs. RESP2's flat
array, for example)
Given the above:
1. We can generate the "return" section of all commands in [redis-doc](https://redis.io/commands/)
(given that "description" and "notes" are comprehensive enough)
2. We can generate a client in a strongly typed language (but the return type could be a conceptual
`union` and the caller needs to know which schema is relevant). see the section below for RESP2 support.
3. We can create a fuzzer for RESP3.
### Limitations (because we are using the standard json-schema)
The problem is that Redis' replies are more diverse than what the json format allows. This means that,
when we convert the reply to a json (in order to validate the schema against it), we lose information (see
the "Testing" section below).
The other option would have been to extend the standard json-schema (and json format) to include stuff
like sets, bulk-strings, error-string, etc. but that would mean also extending the schema-validator - and that
seemed like too much work, so we decided to compromise.
Examples:
1. We cannot tell the difference between an "array" and a "set"
2. We cannot tell the difference between simple-string and bulk-string
3. we cannot verify true uniqueness of items in commands like ZRANGE: json-schema doesn't cover the
case of two identical members with different scores (e.g. `[["m1",6],["m1",7]]`) because `uniqueItems`
compares (member,score) tuples and not just the member name.
### Testing
This commit includes some changes inside Redis in order to verify the schemas (existing and future ones)
are indeed correct (i.e. describe the actual response of Redis).
To do that, we added a debugging feature to Redis that causes it to produce a log of all the commands
it executed and their replies.
For that, Redis needs to be compiled with `-DLOG_REQ_RES` and run with
`--reg-res-logfile <file> --client-default-resp 3` (the testsuite already does that if you run it with
`--log-req-res --force-resp3`)
You should run the testsuite with the above args (and `--dont-clean`) in order to make Redis generate
`.reqres` files (same dir as the `stdout` files) which contain request-response pairs.
These files are later on processed by `./utils/req-res-log-validator.py` which does:
1. Goes over req-res files, generated by redis-servers, spawned by the testsuite (see logreqres.c)
2. For each request-response pair, it validates the response against the request's reply_schema
(obtained from the extended COMMAND DOCS)
5. In order to get good coverage of the Redis commands, and all their different replies, we chose to use
the existing redis test suite, rather than attempt to write a fuzzer.
#### Notes about RESP2
1. We will not be able to use the testing tool to verify RESP2 replies (we are ok with that, it's time to
accept RESP3 as the future RESP)
2. Since the majority of the test suite is using RESP2, and we want the server to reply with RESP3
so that we can validate it, we will need to know how to convert the actual reply to the one expected.
- number and boolean are always strings in RESP2 so the conversion is easy
- objects (maps) are always a flat array in RESP2
- others (nested array in RESP3's `ZRANGE` and others) will need some special per-command
handling (so the client will not be totally auto-generated)
Example for ZRANGE:
```
"reply_schema": {
"anyOf": [
{
"description": "A list of member elements",
"type": "array",
"uniqueItems": true,
"items": {
"type": "string"
}
},
{
"description": "Members and their scores. Returned in case `WITHSCORES` was used.",
"notes": "In RESP2 this is returned as a flat array",
"type": "array",
"uniqueItems": true,
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
}
]
}
```
### Other changes
1. Some tests that behave differently depending on the RESP are now being tested for both RESP,
regardless of the special log-req-res mode ("Pub/Sub PING" for example)
2. Update the history field of CLIENT LIST
3. Added basic tests for commands that were not covered at all by the testsuite
### TODO
- [x] (maybe a different PR) add a "condition" field to anyOf/oneOf schemas that refers to args. e.g.
when `SET` return NULL, the condition is `arguments.get||arguments.condition`, for `OK` the condition
is `!arguments.get`, and for `string` the condition is `arguments.get` - https://github.com/redis/redis/issues/11896
- [x] (maybe a different PR) also run `runtest-cluster` in the req-res logging mode
- [x] add the new tests to GH actions (i.e. compile with `-DLOG_REQ_RES`, run the tests, and run the validator)
- [x] (maybe a different PR) figure out a way to warn about (sub)schemas that are uncovered by the output
of the tests - https://github.com/redis/redis/issues/11897
- [x] (probably a separate PR) add all missing schemas
- [x] check why "SDOWN is triggered by misconfigured instance replying with errors" fails with --log-req-res
- [x] move the response transformers to their own file (run both regular, cluster, and sentinel tests - need to
fight with the tcl including mechanism a bit)
- [x] issue: module API - https://github.com/redis/redis/issues/11898
- [x] (probably a separate PR): improve schemas: add `required` to `object`s - https://github.com/redis/redis/issues/11899
Co-authored-by: Ozan Tezcan <ozantezcan@gmail.com>
Co-authored-by: Hanna Fadida <hanna.fadida@redislabs.com>
Co-authored-by: Oran Agra <oran@redislabs.com>
Co-authored-by: Shaya Potter <shaya@redislabs.com>
2023-03-11 03:14:16 -05:00
|
|
|
if {$::log_req_res} {
|
|
|
|
dict set config "req-res-logfile" "stdout.reqres"
|
|
|
|
}
|
|
|
|
|
|
|
|
if {$::force_resp3} {
|
|
|
|
dict set config "client-default-resp" "3"
|
|
|
|
}
|
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# write new configuration to temporary file
|
|
|
|
set config_file [tmpfile redis.conf]
|
2022-07-12 13:41:29 -04:00
|
|
|
create_server_config_file $config_file $config $config_lines
|
2010-05-14 11:31:11 -04:00
|
|
|
|
|
|
|
set stdout [format "%s/%s" [dict get $config "dir"] "stdout"]
|
|
|
|
set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
|
2010-05-21 06:00:13 -04:00
|
|
|
|
2020-08-31 03:23:09 -04:00
|
|
|
# if we're inside a test, write the test name to the server log file
|
|
|
|
if {[info exists ::cur_test]} {
|
|
|
|
set fd [open $stdout "a+"]
|
|
|
|
puts $fd "### Starting server for test $::cur_test"
|
|
|
|
close $fd
|
Attempt to solve MacOS CI issues in GH Actions (#12013)
The MacOS CI in github actions often hangs without any logs. GH argues that
it's due to resource utilization, either running out of disk space, memory, or CPU
starvation, and thus the runner is terminated.
This PR contains multiple attempts to resolve this:
1. introducing pause_process instead of SIGSTOP, which waits for the process
to stop before resuming the test, possibly resolving race conditions in some tests,
this was a suspect since there was one test that could result in an infinite loop in that
case, in practice this didn't help, but still a good idea to keep.
2. disable the `save` config in many tests that don't need it, specifically ones that use
heavy writes and could create large files.
3. change the `populate` proc to use short pipeline rather than an infinite one.
4. use `--clients 1` in the macos CI so that we don't risk running multiple resource
demanding tests in parallel.
5. enable `--verbose` to be repeated to elevate verbosity and print more info to stdout
when a test or a server starts.
2023-04-12 02:19:21 -04:00
|
|
|
if {$::verbose > 1} {
|
|
|
|
puts "### Starting server $stdout for test - $::cur_test"
|
|
|
|
}
|
2020-08-31 03:23:09 -04:00
|
|
|
}
|
|
|
|
|
2022-01-24 15:31:35 -05:00
|
|
|
# We may have a stdout left over from the previous tests, so we need
|
|
|
|
# to get the current count of ready logs
|
|
|
|
set previous_ready_count [count_message_lines $stdout "Ready to accept"]
|
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
# We need a loop here to retry with different ports.
|
|
|
|
set server_started 0
|
|
|
|
while {$server_started == 0} {
|
|
|
|
if {$::verbose} {
|
2020-05-26 04:00:48 -04:00
|
|
|
puts -nonewline "=== ($tags) Starting server ${::host}:${port} "
|
2020-02-24 04:46:23 -05:00
|
|
|
}
|
2020-02-21 11:08:45 -05:00
|
|
|
|
2020-05-26 04:00:48 -04:00
|
|
|
send_data_packet $::test_server_fd "server-spawning" "port $port"
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2022-05-09 06:37:49 -04:00
|
|
|
set pid [spawn_server $config_file $stdout $stderr $args]
|
2020-02-24 04:46:23 -05:00
|
|
|
|
|
|
|
# check that the server actually started
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set port_busy [wait_server_started $config_file $stdout $pid]
|
2010-12-10 10:13:21 -05:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
# Sometimes we have to try a different port, even if we checked
|
|
|
|
# for availability. Other test clients may grab the port before we
|
|
|
|
# are able to do it for example.
|
|
|
|
if {$port_busy} {
|
2020-05-26 04:00:48 -04:00
|
|
|
puts "Port $port was already busy, trying another port..."
|
|
|
|
set port [find_available_port $::baseport $::portcount]
|
2020-02-24 04:46:23 -05:00
|
|
|
if {$::tls} {
|
Support TLS service when "tls-cluster" is not enabled and persist both plain and TLS port in nodes.conf (#12233)
Originally, when "tls-cluster" is enabled, `port` is set to TLS port. In order to support non-TLS clients, `pport` is used to propagate TCP port across cluster nodes. However when "tls-cluster" is disabled, `port` is set to TCP port, and `pport` is not used, which means the cluster cannot provide TLS service unless "tls-cluster" is on.
```
typedef struct {
// ...
uint16_t port; /* Latest known clients port (TLS or plain). */
uint16_t pport; /* Latest known clients plaintext port. Only used if the main clients port is for TLS. */
// ...
} clusterNode;
```
```
typedef struct {
// ...
uint16_t port; /* TCP base port number. */
uint16_t pport; /* Sender TCP plaintext port, if base port is TLS */
// ...
} clusterMsg;
```
This PR renames `port` and `pport` in `clusterNode` to `tcp_port` and `tls_port`, to record both ports no matter "tls-cluster" is enabled or disabled.
This allows to provide TLS service to clients when "tls-cluster" is disabled: when displaying cluster topology, or giving `MOVED` error, server can provide TLS or TCP port according to client's connection type, no matter what type of connection cluster bus is using.
For backwards compatibility, `port` and `pport` in `clusterMsg` are preserved, when "tls-cluster" is enabled, `port` is set to TLS port and `pport` is set to TCP port, when "tls-cluster" is disabled, `port` is set to TCP port and `pport` is set to TLS port (instead of 0).
Also, in the nodes.conf file, a new aux field displaying an extra port is added to complete the persisted info. We may have `tls_port=xxxxx` or `tcp_port=xxxxx` in the aux field, to complete the cluster topology, while the other port is stored in the normal `<ip>:<port>` field. The format is shown below.
```
<node-id> <ip>:<tcp_port>@<cport>,<hostname>,shard-id=...,tls-port=6379 myself,master - 0 0 0 connected 0-1000
```
Or we can switch the position of two ports, both can be correctly resolved.
```
<node-id> <ip>:<tls_port>@<cport>,<hostname>,shard-id=...,tcp-port=6379 myself,master - 0 0 0 connected 0-1000
```
2023-06-26 10:43:38 -04:00
|
|
|
set pport [find_available_port $::baseport $::portcount]
|
|
|
|
dict set config port $pport
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config "tls-port" $port
|
2020-02-24 04:46:23 -05:00
|
|
|
} else {
|
2020-05-26 04:00:48 -04:00
|
|
|
dict set config port $port
|
2020-02-24 04:46:23 -05:00
|
|
|
}
|
2022-07-12 13:41:29 -04:00
|
|
|
create_server_config_file $config_file $config $config_lines
|
2020-09-07 10:30:36 -04:00
|
|
|
|
|
|
|
# Truncate log so wait_server_started will not be looking at
|
|
|
|
# output of the failed server.
|
|
|
|
close [open $stdout "w"]
|
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
continue; # Try again
|
|
|
|
}
|
2010-12-10 10:13:21 -05:00
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
|
2023-10-02 01:20:53 -04:00
|
|
|
if {$code ne "undefined" && $wait_ready} {
|
2020-05-26 04:00:48 -04:00
|
|
|
set serverisup [server_is_up $::host $port $retrynum]
|
2020-02-24 04:46:23 -05:00
|
|
|
} else {
|
|
|
|
set serverisup 1
|
|
|
|
}
|
2010-08-31 05:17:06 -04:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
if {$::verbose} {
|
|
|
|
puts ""
|
|
|
|
}
|
2014-07-31 14:39:49 -04:00
|
|
|
|
2020-02-24 04:46:23 -05:00
|
|
|
if {!$serverisup} {
|
|
|
|
set err {}
|
|
|
|
append err [exec cat $stdout] "\n" [exec cat $stderr]
|
|
|
|
start_server_error $config_file $err
|
|
|
|
return
|
2020-02-21 12:55:56 -05:00
|
|
|
}
|
2020-02-24 04:46:23 -05:00
|
|
|
set server_started 1
|
2014-11-28 05:49:26 -05:00
|
|
|
}
|
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# setup properties to be able to initialize a client object
|
2019-09-12 03:56:54 -04:00
|
|
|
set port_param [expr $::tls ? {"tls-port"} : {"port"}]
|
2010-05-14 11:31:11 -04:00
|
|
|
set host $::host
|
|
|
|
if {[dict exists $config bind]} { set host [dict get $config bind] }
|
2019-09-12 03:56:54 -04:00
|
|
|
if {[dict exists $config $port_param]} { set port [dict get $config $port_param] }
|
2010-05-14 11:31:11 -04:00
|
|
|
|
2010-05-14 11:45:27 -04:00
|
|
|
# setup config dict
|
2010-10-13 05:25:40 -04:00
|
|
|
dict set srv "config_file" $config_file
|
|
|
|
dict set srv "config" $config
|
2010-05-14 14:50:32 -04:00
|
|
|
dict set srv "pid" $pid
|
|
|
|
dict set srv "host" $host
|
|
|
|
dict set srv "port" $port
|
|
|
|
dict set srv "stdout" $stdout
|
|
|
|
dict set srv "stderr" $stderr
|
2019-09-12 03:56:54 -04:00
|
|
|
dict set srv "unixsocket" $unixsocket
|
Support TLS service when "tls-cluster" is not enabled and persist both plain and TLS port in nodes.conf (#12233)
Originally, when "tls-cluster" is enabled, `port` is set to TLS port. In order to support non-TLS clients, `pport` is used to propagate TCP port across cluster nodes. However when "tls-cluster" is disabled, `port` is set to TCP port, and `pport` is not used, which means the cluster cannot provide TLS service unless "tls-cluster" is on.
```
typedef struct {
// ...
uint16_t port; /* Latest known clients port (TLS or plain). */
uint16_t pport; /* Latest known clients plaintext port. Only used if the main clients port is for TLS. */
// ...
} clusterNode;
```
```
typedef struct {
// ...
uint16_t port; /* TCP base port number. */
uint16_t pport; /* Sender TCP plaintext port, if base port is TLS */
// ...
} clusterMsg;
```
This PR renames `port` and `pport` in `clusterNode` to `tcp_port` and `tls_port`, to record both ports no matter "tls-cluster" is enabled or disabled.
This allows to provide TLS service to clients when "tls-cluster" is disabled: when displaying cluster topology, or giving `MOVED` error, server can provide TLS or TCP port according to client's connection type, no matter what type of connection cluster bus is using.
For backwards compatibility, `port` and `pport` in `clusterMsg` are preserved, when "tls-cluster" is enabled, `port` is set to TLS port and `pport` is set to TCP port, when "tls-cluster" is disabled, `port` is set to TCP port and `pport` is set to TLS port (instead of 0).
Also, in the nodes.conf file, a new aux field displaying an extra port is added to complete the persisted info. We may have `tls_port=xxxxx` or `tcp_port=xxxxx` in the aux field, to complete the cluster topology, while the other port is stored in the normal `<ip>:<port>` field. The format is shown below.
```
<node-id> <ip>:<tcp_port>@<cport>,<hostname>,shard-id=...,tls-port=6379 myself,master - 0 0 0 connected 0-1000
```
Or we can switch the position of two ports, both can be correctly resolved.
```
<node-id> <ip>:<tls_port>@<cport>,<hostname>,shard-id=...,tcp-port=6379 myself,master - 0 0 0 connected 0-1000
```
2023-06-26 10:43:38 -04:00
|
|
|
if {$::tls} {
|
|
|
|
dict set srv "pport" $pport
|
|
|
|
}
|
2010-05-14 11:45:27 -04:00
|
|
|
|
2010-05-19 08:33:39 -04:00
|
|
|
# if a block of code is supplied, we wait for the server to become
|
|
|
|
# available, create a client object and kill the server afterwards
|
2010-05-14 11:31:11 -04:00
|
|
|
if {$code ne "undefined"} {
|
2010-05-19 08:33:39 -04:00
|
|
|
set line [exec head -n1 $stdout]
|
|
|
|
if {[string match {*already in use*} $line]} {
|
|
|
|
error_and_quit $config_file $line
|
|
|
|
}
|
|
|
|
|
2010-05-14 14:50:32 -04:00
|
|
|
# append the server to the stack
|
|
|
|
lappend ::servers $srv
|
2010-10-13 05:25:40 -04:00
|
|
|
|
2023-10-02 01:20:53 -04:00
|
|
|
if {$wait_ready} {
|
|
|
|
while 1 {
|
|
|
|
# check that the server actually started and is ready for connections
|
|
|
|
if {[count_message_lines $stdout "Ready to accept"] > $previous_ready_count} {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
|
|
|
|
|
|
|
# connect client (after server dict is put on the stack)
|
|
|
|
reconnect
|
|
|
|
}
|
2010-10-13 05:25:40 -04:00
|
|
|
|
2021-02-07 05:37:24 -05:00
|
|
|
# remember previous num_failed to catch new errors
|
|
|
|
set prev_num_failed $::num_failed
|
|
|
|
|
2010-05-14 11:31:11 -04:00
|
|
|
# execute provided block
|
2010-12-10 10:13:21 -05:00
|
|
|
set num_tests $::num_tests
|
|
|
|
if {[catch { uplevel 1 $code } error]} {
|
|
|
|
set backtrace $::errorInfo
|
2021-04-18 04:55:54 -04:00
|
|
|
set assertion [string match "assertion:*" $error]
|
2010-12-10 10:13:21 -05:00
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
|
|
|
|
set srv [lindex $::servers end]
|
|
|
|
|
2020-07-21 09:56:19 -04:00
|
|
|
# pop the server object
|
|
|
|
set ::servers [lrange $::servers 0 end-1]
|
|
|
|
|
2010-12-10 10:13:21 -05:00
|
|
|
# Kill the server without checking for leaks
|
|
|
|
dict set srv "skipleaks" 1
|
|
|
|
kill_server $srv
|
|
|
|
|
2021-04-18 04:55:54 -04:00
|
|
|
if {$::dump_logs && $assertion} {
|
|
|
|
# if we caught an assertion ($::num_failed isn't incremented yet)
|
|
|
|
# this happens when the test spawns a server and not the other way around
|
|
|
|
dump_server_log $srv
|
2010-12-10 10:13:21 -05:00
|
|
|
} else {
|
2021-04-18 04:55:54 -04:00
|
|
|
# Print crash report from log
|
|
|
|
set crashlog [crashlog_from_file [dict get $srv "stdout"]]
|
|
|
|
if {[string length $crashlog] > 0} {
|
|
|
|
puts [format "\nLogged crash report (pid %d):" [dict get $srv "pid"]]
|
|
|
|
puts "$crashlog"
|
|
|
|
puts ""
|
|
|
|
}
|
2021-11-11 06:51:33 -05:00
|
|
|
|
|
|
|
set sanitizerlog [sanitizer_errors_from_file [dict get $srv "stderr"]]
|
|
|
|
if {[string length $sanitizerlog] > 0} {
|
|
|
|
puts [format "\nLogged sanitizer errors (pid %d):" [dict get $srv "pid"]]
|
|
|
|
puts "$sanitizerlog"
|
|
|
|
puts ""
|
|
|
|
}
|
2010-12-10 10:13:21 -05:00
|
|
|
}
|
|
|
|
|
2021-04-18 04:55:54 -04:00
|
|
|
if {!$assertion && $::durable} {
|
|
|
|
# durable is meant to prevent the whole tcl test from exiting on
|
|
|
|
# an exception. an assertion will be caught by the test proc.
|
2020-08-31 04:24:17 -04:00
|
|
|
set msg [string range $error 10 end]
|
|
|
|
lappend details $msg
|
|
|
|
lappend details $backtrace
|
|
|
|
lappend ::tests_failed $details
|
|
|
|
|
|
|
|
incr ::num_failed
|
|
|
|
send_data_packet $::test_server_fd err [join $details "\n"]
|
|
|
|
} else {
|
|
|
|
# Re-raise, let handler up the stack take care of this.
|
|
|
|
error $error $backtrace
|
|
|
|
}
|
2021-02-07 05:37:24 -05:00
|
|
|
} else {
|
|
|
|
if {$::dump_logs && $prev_num_failed != $::num_failed} {
|
|
|
|
dump_server_log $srv
|
|
|
|
}
|
2010-08-21 04:54:31 -04:00
|
|
|
}
|
|
|
|
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
# fetch srv back from the server list, in case it was restarted by restart_server (new PID)
|
|
|
|
set srv [lindex $::servers end]
|
|
|
|
|
2010-12-10 10:13:21 -05:00
|
|
|
# Don't do the leak check when no tests were run
|
|
|
|
if {$num_tests == $::num_tests} {
|
2010-06-02 16:53:22 -04:00
|
|
|
dict set srv "skipleaks" 1
|
|
|
|
}
|
2010-05-14 11:31:11 -04:00
|
|
|
|
2010-05-14 14:50:32 -04:00
|
|
|
# pop the server object
|
|
|
|
set ::servers [lrange $::servers 0 end-1]
|
2010-06-02 15:20:29 -04:00
|
|
|
|
2010-06-02 18:25:32 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
2010-06-02 15:20:29 -04:00
|
|
|
kill_server $srv
|
2020-08-31 03:44:43 -04:00
|
|
|
if {!$keep_persistence} {
|
|
|
|
clean_persistence $srv
|
|
|
|
}
|
|
|
|
set _ ""
|
2010-05-14 11:31:11 -04:00
|
|
|
} else {
|
2010-06-02 18:25:32 -04:00
|
|
|
set ::tags [lrange $::tags 0 end-[llength $tags]]
|
2010-05-14 14:50:32 -04:00
|
|
|
set _ $srv
|
2010-05-14 11:31:11 -04:00
|
|
|
}
|
|
|
|
}
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2022-03-16 13:11:38 -04:00
|
|
|
# Start multiple servers with the same options, run code, then stop them.
|
|
|
|
proc start_multiple_servers {num options code} {
|
|
|
|
for {set i 0} {$i < $num} {incr i} {
|
|
|
|
set code [list start_server $options $code]
|
|
|
|
}
|
|
|
|
uplevel 1 $code
|
|
|
|
}
|
|
|
|
|
2022-01-02 02:50:15 -05:00
|
|
|
proc restart_server {level wait_ready rotate_logs {reconnect 1} {shutdown sigterm}} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set srv [lindex $::servers end+$level]
|
2022-01-02 02:50:15 -05:00
|
|
|
if {$shutdown ne {sigterm}} {
|
|
|
|
catch {[dict get $srv "client"] shutdown $shutdown}
|
|
|
|
}
|
|
|
|
# Kill server doesn't mind if the server is already dead
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
kill_server $srv
|
2021-09-13 11:16:47 -04:00
|
|
|
# Remove the default client from the server
|
|
|
|
dict unset srv "client"
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2020-08-14 09:05:34 -04:00
|
|
|
set pid [dict get $srv "pid"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
set stdout [dict get $srv "stdout"]
|
|
|
|
set stderr [dict get $srv "stderr"]
|
2020-08-14 09:05:34 -04:00
|
|
|
if {$rotate_logs} {
|
|
|
|
set ts [clock format [clock seconds] -format %y%m%d%H%M%S]
|
|
|
|
file rename $stdout $stdout.$ts.$pid
|
|
|
|
file rename $stderr $stderr.$ts.$pid
|
|
|
|
}
|
|
|
|
set prev_ready_count [count_message_lines $stdout "Ready to accept"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2020-08-31 03:23:09 -04:00
|
|
|
# if we're inside a test, write the test name to the server log file
|
|
|
|
if {[info exists ::cur_test]} {
|
|
|
|
set fd [open $stdout "a+"]
|
|
|
|
puts $fd "### Restarting server for test $::cur_test"
|
|
|
|
close $fd
|
|
|
|
}
|
|
|
|
|
2020-08-14 09:05:34 -04:00
|
|
|
set config_file [dict get $srv "config_file"]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
2022-05-09 06:37:49 -04:00
|
|
|
set pid [spawn_server $config_file $stdout $stderr {}]
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
|
|
|
|
# check that the server actually started
|
|
|
|
wait_server_started $config_file $stdout $pid
|
|
|
|
|
|
|
|
# update the pid in the servers list
|
|
|
|
dict set srv "pid" $pid
|
|
|
|
# re-set $srv in the servers list
|
|
|
|
lset ::servers end+$level $srv
|
|
|
|
|
|
|
|
if {$wait_ready} {
|
|
|
|
while 1 {
|
|
|
|
# check that the server actually started and is ready for connections
|
2020-08-14 09:05:34 -04:00
|
|
|
if {[count_message_lines $stdout "Ready to accept"] > $prev_ready_count} {
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
after 10
|
|
|
|
}
|
|
|
|
}
|
2021-06-22 05:50:17 -04:00
|
|
|
if {$reconnect} {
|
|
|
|
reconnect $level
|
|
|
|
}
|
tests/valgrind: don't use debug restart (#7404)
* tests/valgrind: don't use debug restart
DEBUG REATART causes two issues:
1. it uses execve which replaces the original process and valgrind doesn't
have a chance to check for errors, so leaks go unreported.
2. valgrind report invalid calls to close() which we're unable to resolve.
So now the tests use restart_server mechanism in the tests, that terminates
the old server and starts a new one, new PID, but same stdout, stderr.
since the stderr can contain two or more valgrind report, it is not enough
to just check for the absence of leaks, we also need to check for some known
errors, we do both, and fail if we either find an error, or can't find a
report saying there are no leaks.
other changes:
- when killing a server that was already terminated we check for leaks too.
- adding DEBUG LEAK which was used to test it.
- adding --trace-children to valgrind, although no longer needed.
- since the stdout contains two or more runs, we need slightly different way
of checking if the new process is up (explicitly looking for the new PID)
- move the code that handles --wait-server to happen earlier (before
watching the startup message in the log), and serve the restarted server too.
* squashme - CR fixes
2020-07-10 01:26:52 -04:00
|
|
|
}
|