mirror of
https://codeberg.org/redict/redict.git
synced 2025-01-23 00:28:26 -05:00
070453eef3
This PR adds a human readable name to a node in clusters that are visible as part of error logs. This is useful so that admins and operators of Redis cluster have better visibility into failures without having to cross-reference the generated ID with some logical identifier (such as pod-ID or EC2 instance ID). This is mentioned in #8948. Specific nodenames can be set by using the variable cluster-announce-human-nodename. The nodename is gossiped using the clusterbus extension in #9530. Co-authored-by: Madelyn Olson <madelyneolson@gmail.com>
219 lines
8.4 KiB
Tcl
219 lines
8.4 KiB
Tcl
# Check if cluster's view of hostnames is consistent
|
|
proc are_hostnames_propagated {match_string} {
|
|
for {set j 0} {$j < [llength $::servers]} {incr j} {
|
|
set cfg [R $j cluster slots]
|
|
foreach node $cfg {
|
|
for {set i 2} {$i < [llength $node]} {incr i} {
|
|
if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
|
|
return 0
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return 1
|
|
}
|
|
|
|
proc get_slot_field {slot_output shard_id node_id attrib_id} {
|
|
return [lindex [lindex [lindex $slot_output $shard_id] $node_id] $attrib_id]
|
|
}
|
|
|
|
# Start a cluster with 3 masters and 4 replicas.
|
|
# These tests rely on specific node ordering, so make sure no node fails over.
|
|
start_cluster 3 4 {tags {external:skip cluster} overrides {cluster-replica-no-failover yes}} {
|
|
test "Set cluster hostnames and verify they are propagated" {
|
|
for {set j 0} {$j < [llength $::servers]} {incr j} {
|
|
R $j config set cluster-announce-hostname "host-$j.com"
|
|
}
|
|
|
|
wait_for_condition 50 100 {
|
|
[are_hostnames_propagated "host-*.com"] eq 1
|
|
} else {
|
|
fail "cluster hostnames were not propagated"
|
|
}
|
|
|
|
# Now that everything is propagated, assert everyone agrees
|
|
wait_for_cluster_propagation
|
|
}
|
|
|
|
test "Update hostnames and make sure they are all eventually propagated" {
|
|
for {set j 0} {$j < [llength $::servers]} {incr j} {
|
|
R $j config set cluster-announce-hostname "host-updated-$j.com"
|
|
}
|
|
|
|
wait_for_condition 50 100 {
|
|
[are_hostnames_propagated "host-updated-*.com"] eq 1
|
|
} else {
|
|
fail "cluster hostnames were not propagated"
|
|
}
|
|
|
|
# Now that everything is propagated, assert everyone agrees
|
|
wait_for_cluster_propagation
|
|
}
|
|
|
|
test "Remove hostnames and make sure they are all eventually propagated" {
|
|
for {set j 0} {$j < [llength $::servers]} {incr j} {
|
|
R $j config set cluster-announce-hostname ""
|
|
}
|
|
|
|
wait_for_condition 50 100 {
|
|
[are_hostnames_propagated ""] eq 1
|
|
} else {
|
|
fail "cluster hostnames were not propagated"
|
|
}
|
|
|
|
# Now that everything is propagated, assert everyone agrees
|
|
wait_for_cluster_propagation
|
|
}
|
|
|
|
test "Verify cluster-preferred-endpoint-type behavior for redirects and info" {
|
|
R 0 config set cluster-announce-hostname "me.com"
|
|
R 1 config set cluster-announce-hostname ""
|
|
R 2 config set cluster-announce-hostname "them.com"
|
|
|
|
wait_for_cluster_propagation
|
|
|
|
# Verify default behavior
|
|
set slot_result [R 0 cluster slots]
|
|
assert_equal "" [lindex [get_slot_field $slot_result 0 2 0] 1]
|
|
assert_equal "" [lindex [get_slot_field $slot_result 2 2 0] 1]
|
|
assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 0]
|
|
assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 1]
|
|
assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 0]
|
|
assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 1]
|
|
|
|
# Redirect will use the IP address
|
|
catch {R 0 set foo foo} redir_err
|
|
assert_match "MOVED * 127.0.0.1:*" $redir_err
|
|
|
|
# Verify prefer hostname behavior
|
|
R 0 config set cluster-preferred-endpoint-type hostname
|
|
|
|
set slot_result [R 0 cluster slots]
|
|
assert_equal "me.com" [get_slot_field $slot_result 0 2 0]
|
|
assert_equal "them.com" [get_slot_field $slot_result 2 2 0]
|
|
|
|
# Redirect should use hostname
|
|
catch {R 0 set foo foo} redir_err
|
|
assert_match "MOVED * them.com:*" $redir_err
|
|
|
|
# Redirect to an unknown hostname returns ?
|
|
catch {R 0 set barfoo bar} redir_err
|
|
assert_match "MOVED * ?:*" $redir_err
|
|
|
|
# Verify unknown hostname behavior
|
|
R 0 config set cluster-preferred-endpoint-type unknown-endpoint
|
|
|
|
# Verify default behavior
|
|
set slot_result [R 0 cluster slots]
|
|
assert_equal "ip" [lindex [get_slot_field $slot_result 0 2 3] 0]
|
|
assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 0 2 3] 1]
|
|
assert_equal "ip" [lindex [get_slot_field $slot_result 2 2 3] 0]
|
|
assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 2 2 3] 1]
|
|
assert_equal "ip" [lindex [get_slot_field $slot_result 1 2 3] 0]
|
|
assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 1 2 3] 1]
|
|
# Not required by the protocol, but IP comes before hostname
|
|
assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 2]
|
|
assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 3]
|
|
assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 2]
|
|
assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 3]
|
|
|
|
# This node doesn't have a hostname
|
|
assert_equal 2 [llength [get_slot_field $slot_result 1 2 3]]
|
|
|
|
# Redirect should use empty string
|
|
catch {R 0 set foo foo} redir_err
|
|
assert_match "MOVED * :*" $redir_err
|
|
|
|
R 0 config set cluster-preferred-endpoint-type ip
|
|
}
|
|
|
|
test "Verify the nodes configured with prefer hostname only show hostname for new nodes" {
|
|
# Have everyone forget node 6 and isolate it from the cluster.
|
|
isolate_node 6
|
|
|
|
# Set hostnames for the masters, now that the node is isolated
|
|
R 0 config set cluster-announce-hostname "shard-1.com"
|
|
R 1 config set cluster-announce-hostname "shard-2.com"
|
|
R 2 config set cluster-announce-hostname "shard-3.com"
|
|
|
|
# Prevent Node 0 and Node 6 from properly meeting,
|
|
# they'll hang in the handshake phase. This allows us to
|
|
# test the case where we "know" about it but haven't
|
|
# successfully retrieved information about it yet.
|
|
R 0 DEBUG DROP-CLUSTER-PACKET-FILTER 0
|
|
R 6 DEBUG DROP-CLUSTER-PACKET-FILTER 0
|
|
|
|
# Have a replica meet the isolated node
|
|
R 3 cluster meet 127.0.0.1 [srv -6 port]
|
|
|
|
# Wait for the isolated node to learn about the rest of the cluster,
|
|
# which correspond to a single entry in cluster nodes. Note this
|
|
# doesn't mean the isolated node has successfully contacted each
|
|
# node.
|
|
wait_for_condition 50 100 {
|
|
[llength [split [R 6 CLUSTER NODES] "\n"]] eq [expr [llength $::servers] + 1]
|
|
} else {
|
|
fail "Isolated node didn't learn about the rest of the cluster *"
|
|
}
|
|
|
|
# Now, we wait until the two nodes that aren't filtering packets
|
|
# to accept our isolated nodes connections. At this point they will
|
|
# start showing up in cluster slots.
|
|
wait_for_condition 50 100 {
|
|
[llength [R 6 CLUSTER SLOTS]] eq 2
|
|
} else {
|
|
fail "Node did not learn about the 2 shards it can talk to"
|
|
}
|
|
set slot_result [R 6 CLUSTER SLOTS]
|
|
assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-2.com"
|
|
assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-3.com"
|
|
|
|
# Also make sure we know about the isolated master, we
|
|
# just can't reach it.
|
|
set master_id [R 0 CLUSTER MYID]
|
|
assert_match "*$master_id*" [R 6 CLUSTER NODES]
|
|
|
|
# Stop dropping cluster packets, and make sure everything
|
|
# stabilizes
|
|
R 0 DEBUG DROP-CLUSTER-PACKET-FILTER -1
|
|
R 6 DEBUG DROP-CLUSTER-PACKET-FILTER -1
|
|
|
|
# This operation sometimes spikes to around 5 seconds to resolve the state,
|
|
# so it has a higher timeout.
|
|
wait_for_condition 50 500 {
|
|
[llength [R 6 CLUSTER SLOTS]] eq 3
|
|
} else {
|
|
fail "Node did not learn about the 2 shards it can talk to"
|
|
}
|
|
set slot_result [R 6 CLUSTER SLOTS]
|
|
assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-1.com"
|
|
assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-2.com"
|
|
assert_equal [lindex [get_slot_field $slot_result 2 2 3] 1] "shard-3.com"
|
|
}
|
|
|
|
test "Test restart will keep hostname information" {
|
|
# Set a new hostname, reboot and make sure it sticks
|
|
R 0 config set cluster-announce-hostname "restart-1.com"
|
|
|
|
# Store the hostname in the config
|
|
R 0 config rewrite
|
|
|
|
restart_server 0 true false
|
|
set slot_result [R 0 CLUSTER SLOTS]
|
|
assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "restart-1.com"
|
|
|
|
# As a sanity check, make sure everyone eventually agrees
|
|
wait_for_cluster_propagation
|
|
}
|
|
|
|
test "Test hostname validation" {
|
|
catch {R 0 config set cluster-announce-hostname [string repeat x 256]} err
|
|
assert_match "*Hostnames must be less than 256 characters*" $err
|
|
catch {R 0 config set cluster-announce-hostname "?.com"} err
|
|
assert_match "*Hostnames may only contain alphanumeric characters, hyphens or dots*" $err
|
|
|
|
# Note this isn't a valid hostname, but it passes our internal validation
|
|
R 0 config set cluster-announce-hostname "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-."
|
|
}
|
|
}
|