2019-10-24 04:41:25 -04:00
|
|
|
set testmodule [file normalize tests/modules/hooks.so]
|
|
|
|
|
|
|
|
tags "modules" {
|
2019-11-19 07:53:39 -05:00
|
|
|
start_server [list overrides [list loadmodule "$testmodule" appendonly yes]] {
|
Always create base AOF file when redis start from empty. (#10102)
Force create a BASE file (use a foreground `rewriteAppendOnlyFile`) when redis starts from an
empty data set and `appendonly` is yes.
The reasoning is that normally, after redis is running for some time, and the AOF has gone though
a few rewrites, there's always a base rdb file. and the scenario where the base file is missing, is
kinda rare (happens only at empty startup), so this change normalizes it.
But more importantly, there are or could be some complex modules that are started with some
configuration, when they create persistence they write that configuration to RDB AUX fields, so
that can can always know with which configuration the persistence file they're loading was
created (could be critical). there is (was) one scenario in which they could load their persisted data,
and that configuration was missing, and this change fixes it.
Add a new module event: REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_AOF_START, similar to
REDISMODULE_SUBEVENT_PERSISTENCE_AOF_START which is async.
Co-authored-by: Oran Agra <oran@redislabs.com>
2022-01-13 01:49:26 -05:00
|
|
|
test {Test module aof save on server start from empty} {
|
|
|
|
assert {[r hooks.event_count persistence-syncaof-start] == 1}
|
|
|
|
}
|
2019-10-29 11:59:09 -04:00
|
|
|
|
2019-10-24 04:41:25 -04:00
|
|
|
test {Test clients connection / disconnection hooks} {
|
|
|
|
for {set j 0} {$j < 2} {incr j} {
|
|
|
|
set rd1 [redis_deferring_client]
|
|
|
|
$rd1 close
|
|
|
|
}
|
2019-10-29 11:59:09 -04:00
|
|
|
assert {[r hooks.event_count client-connected] > 1}
|
|
|
|
assert {[r hooks.event_count client-disconnected] > 1}
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test module cron hook} {
|
|
|
|
after 100
|
|
|
|
assert {[r hooks.event_count cron-loop] > 0}
|
|
|
|
set hz [r hooks.event_last cron-loop]
|
|
|
|
assert_equal $hz 10
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test module loaded / unloaded hooks} {
|
|
|
|
set othermodule [file normalize tests/modules/infotest.so]
|
|
|
|
r module load $othermodule
|
|
|
|
r module unload infotest
|
|
|
|
assert_equal [r hooks.event_last module-loaded] "infotest"
|
|
|
|
assert_equal [r hooks.event_last module-unloaded] "infotest"
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test module aofrw hook} {
|
|
|
|
r debug populate 1000 foo 10000 ;# 10mb worth of data
|
|
|
|
r config set rdbcompression no ;# rdb progress is only checked once in 2mb
|
|
|
|
r BGREWRITEAOF
|
|
|
|
waitForBgrewriteaof r
|
|
|
|
assert_equal [string match {*module-event-persistence-aof-start*} [exec tail -20 < [srv 0 stdout]]] 1
|
|
|
|
assert_equal [string match {*module-event-persistence-end*} [exec tail -20 < [srv 0 stdout]]] 1
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test module aof load and rdb/aof progress hooks} {
|
|
|
|
# create some aof tail (progress is checked only once in 1000 commands)
|
|
|
|
for {set j 0} {$j < 4000} {incr j} {
|
|
|
|
r set "bar$j" x
|
|
|
|
}
|
|
|
|
# set some configs that will cause many loading progress events during aof loading
|
2019-11-19 07:53:39 -05:00
|
|
|
r config set key-load-delay 500
|
2019-10-29 11:59:09 -04:00
|
|
|
r config set dynamic-hz no
|
|
|
|
r config set hz 500
|
|
|
|
r DEBUG LOADAOF
|
|
|
|
assert_equal [r hooks.event_last loading-aof-start] 0
|
|
|
|
assert_equal [r hooks.event_last loading-end] 0
|
|
|
|
assert {[r hooks.event_count loading-rdb-start] == 0}
|
2019-11-19 07:53:39 -05:00
|
|
|
assert_lessthan 2 [r hooks.event_count loading-progress-rdb] ;# comes from the preamble section
|
|
|
|
assert_lessthan 2 [r hooks.event_count loading-progress-aof]
|
|
|
|
if {$::verbose} {
|
|
|
|
puts "rdb progress events [r hooks.event_count loading-progress-rdb]"
|
|
|
|
puts "aof progress events [r hooks.event_count loading-progress-aof]"
|
|
|
|
}
|
2019-10-29 11:59:09 -04:00
|
|
|
}
|
|
|
|
# undo configs before next test
|
|
|
|
r config set dynamic-hz yes
|
|
|
|
r config set key-load-delay 0
|
|
|
|
|
|
|
|
test {Test module rdb save hook} {
|
|
|
|
# debug reload does: save, flush, load:
|
|
|
|
assert {[r hooks.event_count persistence-syncrdb-start] == 0}
|
|
|
|
assert {[r hooks.event_count loading-rdb-start] == 0}
|
|
|
|
r debug reload
|
|
|
|
assert {[r hooks.event_count persistence-syncrdb-start] == 1}
|
|
|
|
assert {[r hooks.event_count loading-rdb-start] == 1}
|
2019-10-24 04:41:25 -04:00
|
|
|
}
|
2019-10-24 04:51:03 -04:00
|
|
|
|
|
|
|
test {Test flushdb hooks} {
|
|
|
|
r flushdb
|
2019-10-29 11:59:09 -04:00
|
|
|
assert_equal [r hooks.event_last flush-start] 9
|
|
|
|
assert_equal [r hooks.event_last flush-end] 9
|
|
|
|
r flushall
|
|
|
|
assert_equal [r hooks.event_last flush-start] -1
|
|
|
|
assert_equal [r hooks.event_last flush-end] -1
|
|
|
|
}
|
|
|
|
|
|
|
|
# replication related tests
|
|
|
|
set master [srv 0 client]
|
|
|
|
set master_host [srv 0 host]
|
|
|
|
set master_port [srv 0 port]
|
|
|
|
start_server {} {
|
|
|
|
r module load $testmodule
|
|
|
|
set replica [srv 0 client]
|
|
|
|
set replica_host [srv 0 host]
|
|
|
|
set replica_port [srv 0 port]
|
|
|
|
$replica replicaof $master_host $master_port
|
|
|
|
|
Set repl-diskless-sync to yes by default, add repl-diskless-sync-max-replicas (#10092)
1. enable diskless replication by default
2. add a new config named repl-diskless-sync-max-replicas that enables
replication to start before the full repl-diskless-sync-delay was
reached.
3. put replica online sooner on the master (see below)
4. test suite uses repl-diskless-sync-delay of 0 to be faster
5. a few tests that use multiple replica on a pre-populated master, are
now using the new repl-diskless-sync-max-replicas
6. fix possible timing issues in a few cluster tests (see below)
put replica online sooner on the master
----------------------------------------------------
there were two tests that failed because they needed for the master to
realize that the replica is online, but the test code was actually only
waiting for the replica to realize it's online, and in diskless it could
have been before the master realized it.
changes include two things:
1. the tests wait on the right thing
2. issues in the master, putting the replica online in two steps.
the master used to put the replica as online in 2 steps. the first
step was to mark it as online, and the second step was to enable the
write event (only after getting ACK), but in fact the first step didn't
contains some of the tasks to put it online (like updating good slave
count, and sending the module event). this meant that if a test was
waiting to see that the replica is online form the point of view of the
master, and then confirm that the module got an event, or that the
master has enough good replicas, it could fail due to timing issues.
so now the full effect of putting the replica online, happens at once,
and only the part about enabling the writes is delayed till the ACK.
fix cluster tests
--------------------
I added some code to wait for the replica to sync and avoid race
conditions.
later realized the sentinel and cluster tests where using the original 5
seconds delay, so changed it to 0.
this means the other changes are probably not needed, but i suppose
they're still better (avoid race conditions)
2022-01-17 07:11:11 -05:00
|
|
|
wait_replica_online $master
|
2019-10-29 11:59:09 -04:00
|
|
|
|
|
|
|
test {Test master link up hook} {
|
|
|
|
assert_equal [r hooks.event_count masterlink-up] 1
|
|
|
|
assert_equal [r hooks.event_count masterlink-down] 0
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test role-replica hook} {
|
|
|
|
assert_equal [r hooks.event_count role-replica] 1
|
|
|
|
assert_equal [r hooks.event_count role-master] 0
|
|
|
|
assert_equal [r hooks.event_last role-replica] [s 0 master_host]
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test replica-online hook} {
|
|
|
|
assert_equal [r -1 hooks.event_count replica-online] 1
|
|
|
|
assert_equal [r -1 hooks.event_count replica-offline] 0
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test master link down hook} {
|
|
|
|
r client kill type master
|
|
|
|
assert_equal [r hooks.event_count masterlink-down] 1
|
2020-07-31 06:14:29 -04:00
|
|
|
|
|
|
|
wait_for_condition 50 100 {
|
|
|
|
[string match {*master_link_status:up*} [r info replication]]
|
|
|
|
} else {
|
|
|
|
fail "Replica didn't reconnect"
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_equal [r hooks.event_count masterlink-down] 1
|
|
|
|
assert_equal [r hooks.event_count masterlink-up] 2
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_condition 50 10 {
|
|
|
|
[string match {*master_link_status:up*} [r info replication]]
|
|
|
|
} else {
|
|
|
|
fail "Can't turn the instance into a replica"
|
2019-10-29 11:59:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
$replica replicaof no one
|
|
|
|
|
|
|
|
test {Test role-master hook} {
|
|
|
|
assert_equal [r hooks.event_count role-replica] 1
|
|
|
|
assert_equal [r hooks.event_count role-master] 1
|
|
|
|
assert_equal [r hooks.event_last role-master] {}
|
|
|
|
}
|
|
|
|
|
|
|
|
test {Test replica-offline hook} {
|
2020-07-31 06:14:29 -04:00
|
|
|
assert_equal [r -1 hooks.event_count replica-online] 2
|
|
|
|
assert_equal [r -1 hooks.event_count replica-offline] 2
|
2019-10-29 11:59:09 -04:00
|
|
|
}
|
|
|
|
# get the replica stdout, to be used by the next test
|
|
|
|
set replica_stdout [srv 0 stdout]
|
2019-10-24 04:51:03 -04:00
|
|
|
}
|
2019-10-29 11:59:09 -04:00
|
|
|
|
2020-09-20 06:36:20 -04:00
|
|
|
test {Test swapdb hooks} {
|
|
|
|
r swapdb 0 10
|
|
|
|
assert_equal [r hooks.event_last swapdb-first] 0
|
|
|
|
assert_equal [r hooks.event_last swapdb-second] 10
|
|
|
|
|
|
|
|
}
|
2019-10-29 11:59:09 -04:00
|
|
|
|
|
|
|
# look into the log file of the server that just exited
|
|
|
|
test {Test shutdown hook} {
|
|
|
|
assert_equal [string match {*module-event-shutdown*} [exec tail -5 < $replica_stdout]] 1
|
|
|
|
}
|
|
|
|
|
2019-10-24 04:41:25 -04:00
|
|
|
}
|
|
|
|
}
|