2011-04-11 10:41:06 -04:00
|
|
|
#!/usr/bin/env ruby
|
|
|
|
|
2011-09-30 19:31:46 -04:00
|
|
|
# TODO (temporary here, we'll move this into the Github issues once
|
2013-01-16 12:00:20 -05:00
|
|
|
# redis-trib initial implementation is completed).
|
2011-09-30 16:18:07 -04:00
|
|
|
#
|
2011-09-30 19:31:46 -04:00
|
|
|
# - Make sure that if the rehashing fails in the middle redis-trib will try
|
|
|
|
# to recover.
|
|
|
|
# - When redis-trib performs a cluster check, if it detects a slot move in
|
|
|
|
# progress it should prompt the user to continue the move from where it
|
|
|
|
# stopped.
|
|
|
|
# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop
|
|
|
|
# while rehashing, and performing the best cleanup possible if the user
|
|
|
|
# forces the quit.
|
|
|
|
# - When doing "fix" set a global Fix to true, and prompt the user to
|
|
|
|
# fix the problem if automatically fixable every time there is something
|
|
|
|
# to fix. For instance:
|
|
|
|
# 1) If there is a node that pretend to receive a slot, or to migrate a
|
|
|
|
# slot, but has no entries in that slot, fix it.
|
|
|
|
# 2) If there is a node having keys in slots that are not owned by it
|
2013-01-16 12:00:20 -05:00
|
|
|
# fix this condition moving the entries in the same node.
|
2011-09-30 19:31:46 -04:00
|
|
|
# 3) Perform more possibly slow tests about the state of the cluster.
|
|
|
|
# 4) When aborted slot migration is detected, fix it.
|
2011-09-30 16:18:07 -04:00
|
|
|
|
2011-04-11 10:41:06 -04:00
|
|
|
require 'rubygems'
|
|
|
|
require 'redis'
|
|
|
|
|
2013-02-14 06:55:34 -05:00
|
|
|
ClusterHashSlots = 16384
|
2011-04-12 11:06:33 -04:00
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def xputs(s)
|
2013-03-25 07:50:38 -04:00
|
|
|
case s[0..2]
|
|
|
|
when ">>>"
|
|
|
|
color="29;1"
|
|
|
|
when "[ER"
|
|
|
|
color="31;1"
|
|
|
|
when "[OK"
|
|
|
|
color="32"
|
|
|
|
when "[FA","***"
|
|
|
|
color="33"
|
|
|
|
else
|
|
|
|
color=nil
|
|
|
|
end
|
|
|
|
|
2013-03-25 07:51:53 -04:00
|
|
|
color = nil if ENV['TERM'] != "xterm"
|
2013-03-25 07:50:38 -04:00
|
|
|
print "\033[#{color}m" if color
|
|
|
|
print s
|
|
|
|
print "\033[0m" if color
|
|
|
|
print "\n"
|
2011-04-11 10:58:47 -04:00
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
class ClusterNode
|
|
|
|
def initialize(addr)
|
|
|
|
s = addr.split(":")
|
2011-04-11 10:41:06 -04:00
|
|
|
if s.length != 2
|
2011-04-13 12:50:50 -04:00
|
|
|
puts "Invalid node name #{addr}"
|
2011-04-11 10:41:06 -04:00
|
|
|
exit 1
|
|
|
|
end
|
2011-04-13 12:40:51 -04:00
|
|
|
@r = nil
|
2011-09-29 08:44:15 -04:00
|
|
|
@info = {}
|
|
|
|
@info[:host] = s[0]
|
|
|
|
@info[:port] = s[1]
|
|
|
|
@info[:slots] = {}
|
2013-03-21 11:31:53 -04:00
|
|
|
@info[:migrating] = {}
|
|
|
|
@info[:importing] = {}
|
2013-11-05 05:24:24 -05:00
|
|
|
@info[:replicate] = false
|
2011-09-29 08:44:15 -04:00
|
|
|
@dirty = false # True if we need to flush slots info into node.
|
2011-09-28 13:08:35 -04:00
|
|
|
@friends = []
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
|
2011-09-29 06:50:46 -04:00
|
|
|
def friends
|
|
|
|
@friends
|
|
|
|
end
|
|
|
|
|
|
|
|
def slots
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots]
|
2011-09-29 06:50:46 -04:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
def has_flag?(flag)
|
|
|
|
@info[:flags].index(flag)
|
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def to_s
|
2011-09-29 08:44:15 -04:00
|
|
|
"#{@info[:host]}:#{@info[:port]}"
|
2011-04-11 10:58:47 -04:00
|
|
|
end
|
|
|
|
|
2011-04-13 12:40:51 -04:00
|
|
|
def connect(o={})
|
2011-09-28 13:08:35 -04:00
|
|
|
return if @r
|
2013-03-22 12:39:43 -04:00
|
|
|
print "Connecting to node #{self}: "
|
|
|
|
STDOUT.flush
|
2011-04-11 10:41:06 -04:00
|
|
|
begin
|
2013-12-17 04:00:33 -05:00
|
|
|
@r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60)
|
2011-04-11 10:58:47 -04:00
|
|
|
@r.ping
|
2011-04-11 10:41:06 -04:00
|
|
|
rescue
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[ERR] Sorry, can't connect to node #{self}"
|
2011-04-13 12:40:51 -04:00
|
|
|
exit 1 if o[:abort]
|
|
|
|
@r = nil
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "OK"
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def assert_cluster
|
|
|
|
info = @r.info
|
|
|
|
if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[ERR] Node #{self} is not configured as a cluster node."
|
2011-04-11 10:58:47 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-04-11 12:26:00 -04:00
|
|
|
def assert_empty
|
|
|
|
if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) ||
|
|
|
|
(@r.info['db0'])
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0."
|
2011-04-11 12:26:00 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-09-28 13:08:35 -04:00
|
|
|
def load_info(o={})
|
|
|
|
self.connect
|
|
|
|
nodes = @r.cluster("nodes").split("\n")
|
|
|
|
nodes.each{|n|
|
|
|
|
# name addr flags role ping_sent ping_recv link_status slots
|
2011-09-30 19:31:46 -04:00
|
|
|
split = n.split
|
2014-01-17 06:06:06 -05:00
|
|
|
name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6]
|
2013-09-25 06:51:01 -04:00
|
|
|
slots = split[8..-1]
|
2011-09-28 13:08:35 -04:00
|
|
|
info = {
|
|
|
|
:name => name,
|
|
|
|
:addr => addr,
|
|
|
|
:flags => flags.split(","),
|
2014-01-17 06:06:06 -05:00
|
|
|
:replicate => master_id,
|
2011-09-28 13:08:35 -04:00
|
|
|
:ping_sent => ping_sent.to_i,
|
|
|
|
:ping_recv => ping_recv.to_i,
|
|
|
|
:link_status => link_status
|
|
|
|
}
|
2014-01-17 06:06:06 -05:00
|
|
|
info[:replicate] = false if master_id == "-"
|
|
|
|
|
2011-09-28 13:08:35 -04:00
|
|
|
if info[:flags].index("myself")
|
2011-09-29 08:44:15 -04:00
|
|
|
@info = @info.merge(info)
|
|
|
|
@info[:slots] = {}
|
2011-09-30 19:31:46 -04:00
|
|
|
slots.each{|s|
|
|
|
|
if s[0..0] == '['
|
2013-03-22 07:53:37 -04:00
|
|
|
if s.index("->-") # Migrating
|
2013-03-21 11:31:53 -04:00
|
|
|
slot,dst = s[1..-1].split("->-")
|
|
|
|
@info[:migrating][slot] = dst
|
2013-03-22 07:53:37 -04:00
|
|
|
elsif s.index("-<-") # Importing
|
2013-03-21 11:31:53 -04:00
|
|
|
slot,src = s[1..-1].split("-<-")
|
|
|
|
@info[:importing][slot] = src
|
|
|
|
end
|
2011-09-30 19:31:46 -04:00
|
|
|
elsif s.index("-")
|
2011-09-28 13:08:35 -04:00
|
|
|
start,stop = s.split("-")
|
|
|
|
self.add_slots((start.to_i)..(stop.to_i))
|
|
|
|
else
|
|
|
|
self.add_slots((s.to_i)..(s.to_i))
|
|
|
|
end
|
2011-09-29 09:31:02 -04:00
|
|
|
} if slots
|
2011-09-28 13:08:35 -04:00
|
|
|
@dirty = false
|
2011-09-29 07:18:09 -04:00
|
|
|
@r.cluster("info").split("\n").each{|e|
|
|
|
|
k,v=e.split(":")
|
|
|
|
k = k.to_sym
|
2011-09-29 09:04:23 -04:00
|
|
|
v.chop!
|
2011-09-29 07:18:09 -04:00
|
|
|
if k != :cluster_state
|
|
|
|
@info[k] = v.to_i
|
|
|
|
else
|
|
|
|
@info[k] = v
|
|
|
|
end
|
|
|
|
}
|
2011-09-28 13:08:35 -04:00
|
|
|
elsif o[:getfriends]
|
|
|
|
@friends << info
|
|
|
|
end
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2011-04-12 11:06:33 -04:00
|
|
|
def add_slots(slots)
|
|
|
|
slots.each{|s|
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots][s] = :new
|
2011-04-12 11:06:33 -04:00
|
|
|
}
|
|
|
|
@dirty = true
|
|
|
|
end
|
|
|
|
|
2013-11-05 05:24:24 -05:00
|
|
|
def set_as_replica(node_id)
|
|
|
|
@info[:replicate] = node_id
|
|
|
|
@dirty = true
|
|
|
|
end
|
|
|
|
|
2011-04-12 11:06:33 -04:00
|
|
|
def flush_node_config
|
|
|
|
return if !@dirty
|
2013-11-05 05:24:24 -05:00
|
|
|
if @info[:replicate]
|
|
|
|
begin
|
|
|
|
@r.cluster("replicate",@info[:replicate])
|
|
|
|
rescue
|
|
|
|
# If the cluster did not already joined it is possible that
|
|
|
|
# the slave does not know the master node yet. So on errors
|
|
|
|
# we return ASAP leaving the dirty flag set, to flush the
|
|
|
|
# config later.
|
|
|
|
return
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
2013-11-05 05:24:24 -05:00
|
|
|
else
|
|
|
|
new = []
|
|
|
|
@info[:slots].each{|s,val|
|
|
|
|
if val == :new
|
|
|
|
new << s
|
|
|
|
@info[:slots][s] = true
|
|
|
|
end
|
|
|
|
}
|
|
|
|
@r.cluster("addslots",*new)
|
|
|
|
end
|
2011-04-12 11:06:33 -04:00
|
|
|
@dirty = false
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def info_string
|
2011-04-13 12:40:51 -04:00
|
|
|
# We want to display the hash slots assigned to this node
|
2011-04-13 14:17:11 -04:00
|
|
|
# as ranges, like in: "1-5,8-9,20-25,30"
|
2011-04-13 12:40:51 -04:00
|
|
|
#
|
|
|
|
# Note: this could be easily written without side effects,
|
|
|
|
# we use 'slots' just to split the computation into steps.
|
|
|
|
|
|
|
|
# First step: we want an increasing array of integers
|
|
|
|
# for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
|
2011-09-29 08:44:15 -04:00
|
|
|
slots = @info[:slots].keys.sort
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-01-16 12:00:20 -05:00
|
|
|
# As we want to aggregate adjacent slots we convert all the
|
2011-04-13 12:40:51 -04:00
|
|
|
# slot integers into ranges (with just one element)
|
|
|
|
# So we have something like [1..1,2..2, ... and so forth.
|
2011-04-13 12:46:29 -04:00
|
|
|
slots.map!{|x| x..x}
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-01-16 12:00:20 -05:00
|
|
|
# Finally we group ranges with adjacent elements.
|
2011-04-13 12:40:51 -04:00
|
|
|
slots = slots.reduce([]) {|a,b|
|
|
|
|
if !a.empty? && b.first == (a[-1].last)+1
|
|
|
|
a[0..-2] + [(a[-1].first)..(b.last)]
|
2011-04-12 11:06:33 -04:00
|
|
|
else
|
2011-04-13 12:40:51 -04:00
|
|
|
a + [b]
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
2011-04-13 12:40:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
# Now our task is easy, we just convert ranges with just one
|
|
|
|
# element into a number, and a real range into a start-end format.
|
|
|
|
# Finally we join the array using the comma as separator.
|
|
|
|
slots = slots.map{|x|
|
|
|
|
x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}"
|
2011-04-12 11:06:33 -04:00
|
|
|
}.join(",")
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-03-26 13:16:03 -04:00
|
|
|
role = self.has_flag?("master") ? "M" : "S"
|
2013-11-05 05:24:24 -05:00
|
|
|
|
|
|
|
if self.info[:replicate] and @dirty
|
2014-01-17 06:06:06 -05:00
|
|
|
is = "S: #{self.info[:name]} #{self.to_s}"
|
2013-11-05 05:24:24 -05:00
|
|
|
else
|
2014-01-17 06:06:06 -05:00
|
|
|
is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+
|
2013-11-05 05:24:24 -05:00
|
|
|
" slots:#{slots} (#{self.slots.length} slots) "+
|
|
|
|
"#{(self.info[:flags]-["myself"]).join(",")}"
|
|
|
|
end
|
2014-01-17 06:06:06 -05:00
|
|
|
if self.info[:replicate]
|
|
|
|
is += "\n replicates #{info[:replicate]}"
|
2014-01-17 11:56:45 -05:00
|
|
|
elsif self.has_flag?("master") && self.info[:replicas]
|
|
|
|
is += "\n #{info[:replicas].length} additional replica(s)"
|
2014-01-17 06:06:06 -05:00
|
|
|
end
|
|
|
|
is
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
2011-04-13 09:58:05 -04:00
|
|
|
|
2013-02-22 06:25:16 -05:00
|
|
|
# Return a single string representing nodes and associated slots.
|
|
|
|
# TODO: remove slaves from config when slaves will be handled
|
|
|
|
# by Redis Cluster.
|
|
|
|
def get_config_signature
|
|
|
|
config = []
|
|
|
|
@r.cluster("nodes").each_line{|l|
|
|
|
|
s = l.split
|
2013-09-25 06:51:01 -04:00
|
|
|
slots = s[8..-1].select {|x| x[0..0] != "["}
|
2013-02-28 07:12:56 -05:00
|
|
|
next if slots.length == 0
|
2013-02-22 06:25:16 -05:00
|
|
|
config << s[0]+":"+(slots.sort.join(","))
|
|
|
|
}
|
|
|
|
config.sort.join("|")
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def info
|
2011-09-29 08:44:15 -04:00
|
|
|
@info
|
2011-04-13 09:58:05 -04:00
|
|
|
end
|
2011-04-12 11:06:33 -04:00
|
|
|
|
|
|
|
def is_dirty?
|
|
|
|
@dirty
|
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def r
|
|
|
|
@r
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class RedisTrib
|
2011-04-12 11:06:33 -04:00
|
|
|
def initialize
|
|
|
|
@nodes = []
|
2013-02-21 07:06:59 -05:00
|
|
|
@fix = false
|
|
|
|
@errors = []
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def check_arity(req_args, num_args)
|
|
|
|
if ((req_args > 0 and num_args != req_args) ||
|
|
|
|
(req_args < 0 and num_args < req_args.abs))
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[ERR] Wrong number of arguments for specified sub command"
|
2011-04-11 10:58:47 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-04-13 12:40:51 -04:00
|
|
|
def add_node(node)
|
|
|
|
@nodes << node
|
|
|
|
end
|
|
|
|
|
2013-03-22 07:59:18 -04:00
|
|
|
def cluster_error(msg)
|
|
|
|
@errors << msg
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs msg
|
2013-03-22 07:59:18 -04:00
|
|
|
end
|
|
|
|
|
2011-09-29 10:06:49 -04:00
|
|
|
def get_node_by_name(name)
|
|
|
|
@nodes.each{|n|
|
|
|
|
return n if n.info[:name] == name.downcase
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
end
|
|
|
|
|
2014-01-17 05:48:42 -05:00
|
|
|
# This function returns the master that has the least number of replicas
|
|
|
|
# in the cluster. If there are multiple masters with the same smaller
|
|
|
|
# number of replicas, one at random is returned.
|
|
|
|
def get_master_with_least_replicas
|
|
|
|
masters = @nodes.select{|n| n.has_flag? "master"}
|
|
|
|
sorted = masters.sort{|a,b|
|
|
|
|
a.info[:replicas].length <=> b.info[:replicas].length
|
|
|
|
}
|
|
|
|
sorted[0]
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def check_cluster
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Performing Cluster Check (using node #{@nodes[0]})"
|
2011-04-13 12:40:51 -04:00
|
|
|
show_nodes
|
2013-02-22 06:25:16 -05:00
|
|
|
check_config_consistency
|
2013-03-21 12:11:54 -04:00
|
|
|
check_open_slots
|
2013-02-21 07:00:41 -05:00
|
|
|
check_slots_coverage
|
|
|
|
end
|
|
|
|
|
|
|
|
# Merge slots of every known node. If the resulting slots are equal
|
|
|
|
# to ClusterHashSlots, then all slots are served.
|
|
|
|
def covered_slots
|
2011-09-29 06:50:46 -04:00
|
|
|
slots = {}
|
|
|
|
@nodes.each{|n|
|
|
|
|
slots = slots.merge(n.slots)
|
|
|
|
}
|
2013-02-21 07:00:41 -05:00
|
|
|
slots
|
|
|
|
end
|
|
|
|
|
|
|
|
def check_slots_coverage
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Check slots coverage..."
|
2013-02-21 07:00:41 -05:00
|
|
|
slots = covered_slots
|
2013-02-14 06:55:34 -05:00
|
|
|
if slots.length == ClusterHashSlots
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[OK] All #{ClusterHashSlots} slots covered."
|
2011-09-29 06:50:46 -04:00
|
|
|
else
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
2013-02-21 07:00:41 -05:00
|
|
|
"[ERR] Not all #{ClusterHashSlots} slots are covered by nodes."
|
2013-02-21 10:58:27 -05:00
|
|
|
fix_slots_coverage if @fix
|
2011-09-29 06:50:46 -04:00
|
|
|
end
|
2013-02-21 07:00:41 -05:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
def check_open_slots
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Check for open slots..."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
if n.info[:migrating].size > 0
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
2014-02-10 11:44:16 -05:00
|
|
|
"[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots += n.info[:migrating].keys
|
|
|
|
elsif n.info[:importing].size > 0
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
2014-02-10 11:44:16 -05:00
|
|
|
"[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots += n.info[:importing].keys
|
|
|
|
end
|
|
|
|
}
|
|
|
|
open_slots.uniq!
|
2013-03-21 12:22:14 -04:00
|
|
|
if open_slots.length > 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[WARNING] The following slots are open: #{open_slots.join(",")}"
|
2013-03-21 12:11:54 -04:00
|
|
|
end
|
|
|
|
if @fix
|
|
|
|
open_slots.each{|slot| fix_open_slot slot}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-21 12:10:06 -05:00
|
|
|
def nodes_with_keys_in_slot(slot)
|
|
|
|
nodes = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
end
|
|
|
|
|
2013-02-21 07:00:41 -05:00
|
|
|
def fix_slots_coverage
|
2013-02-21 12:10:06 -05:00
|
|
|
not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Fixing slots coverage..."
|
|
|
|
xputs "List of not covered slots: " + not_covered.join(",")
|
2013-02-21 12:10:06 -05:00
|
|
|
|
|
|
|
# For every slot, take action depending on the actual condition:
|
|
|
|
# 1) No node has keys for this slot.
|
|
|
|
# 2) A single node has keys for this slot.
|
|
|
|
# 3) Multiple nodes have keys for this slot.
|
|
|
|
slots = {}
|
|
|
|
not_covered.each{|slot|
|
|
|
|
nodes = nodes_with_keys_in_slot(slot)
|
|
|
|
slots[slot] = nodes
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join}"
|
2013-02-21 12:10:06 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
none = slots.select {|k,v| v.length == 0}
|
|
|
|
single = slots.select {|k,v| v.length == 1}
|
|
|
|
multi = slots.select {|k,v| v.length > 1}
|
|
|
|
|
2013-02-22 04:23:53 -05:00
|
|
|
# Handle case "1": keys in no node.
|
|
|
|
if none.length > 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "The folowing uncovered slots have no keys across the cluster:"
|
|
|
|
xputs none.keys.join(",")
|
2013-02-22 04:23:53 -05:00
|
|
|
yes_or_die "Fix these slots by covering with a random node?"
|
|
|
|
none.each{|slot,nodes|
|
|
|
|
node = @nodes.sample
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Covering slot #{slot} with #{node}"
|
2013-02-22 04:23:53 -05:00
|
|
|
node.r.cluster("addslots",slot)
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
# Handle case "2": keys only in one node.
|
|
|
|
if single.length > 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "The folowing uncovered slots have keys in just one node:"
|
2013-02-21 12:10:06 -05:00
|
|
|
puts single.keys.join(",")
|
2013-02-22 04:23:53 -05:00
|
|
|
yes_or_die "Fix these slots by covering with those nodes?"
|
2013-02-21 12:10:06 -05:00
|
|
|
single.each{|slot,nodes|
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Covering slot #{slot} with #{nodes[0]}"
|
2013-02-21 12:10:06 -05:00
|
|
|
nodes[0].r.cluster("addslots",slot)
|
|
|
|
}
|
|
|
|
end
|
2013-02-22 05:33:10 -05:00
|
|
|
|
|
|
|
# Handle case "3": keys in multiple nodes.
|
|
|
|
if multi.length > 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "The folowing uncovered slots have keys in multiple nodes:"
|
|
|
|
xputs multi.keys.join(",")
|
2013-02-22 05:33:10 -05:00
|
|
|
yes_or_die "Fix these slots by moving keys into a single node?"
|
|
|
|
multi.each{|slot,nodes|
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Covering slot #{slot} moving keys to #{nodes[0]}"
|
2013-02-22 05:33:10 -05:00
|
|
|
# TODO
|
|
|
|
# 1) Set all nodes as "MIGRATING" for this slot, so that we
|
|
|
|
# can access keys in the hash slot using ASKING.
|
|
|
|
# 2) Move everything to node[0]
|
|
|
|
# 3) Clear MIGRATING from nodes, and ADDSLOTS the slot to
|
|
|
|
# node[0].
|
|
|
|
raise "TODO: Work in progress"
|
|
|
|
}
|
|
|
|
end
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
# Slot 'slot' was found to be in importing or migrating state in one or
|
|
|
|
# more nodes. This function fixes this condition by migrating keys where
|
|
|
|
# it seems more sensible.
|
|
|
|
def fix_open_slot(slot)
|
|
|
|
migrating = []
|
|
|
|
importing = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
next if n.has_flag? "slave"
|
|
|
|
if n.info[:migrating][slot]
|
|
|
|
migrating << n
|
|
|
|
elsif n.info[:importing][slot]
|
|
|
|
importing << n
|
|
|
|
elsif n.r.cluster("countkeysinslot",slot) > 0
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "*** Found keys about slot #{slot} in node #{n}!"
|
2013-03-21 12:11:54 -04:00
|
|
|
end
|
|
|
|
}
|
2013-03-22 12:39:43 -04:00
|
|
|
puts ">>> Fixing open slot #{slot}"
|
2013-03-21 12:11:54 -04:00
|
|
|
puts "Set as migrating in: #{migrating.join(",")}"
|
|
|
|
puts "Set as importing in: #{importing.join(",")}"
|
|
|
|
|
|
|
|
# Case 1: The slot is in migrating state in one slot, and in
|
|
|
|
# importing state in 1 slot. That's trivial to address.
|
|
|
|
if migrating.length == 1 && importing.length == 1
|
2013-03-22 08:03:33 -04:00
|
|
|
move_slot(migrating[0],importing[0],slot,:verbose=>true)
|
2014-02-10 13:10:21 -05:00
|
|
|
elsif migrating.length == 1 && importing.length == 0
|
|
|
|
xputs ">>> Setting #{slot} as STABLE"
|
|
|
|
migrating[0].r.cluster("setslot",slot,"stable")
|
2014-02-11 04:13:18 -05:00
|
|
|
elsif migrating.length == 0 && importing.length == 1
|
|
|
|
xputs ">>> Setting #{slot} as STABLE"
|
|
|
|
importing[0].r.cluster("setslot",slot,"stable")
|
2013-03-21 12:11:54 -04:00
|
|
|
else
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress)"
|
2013-03-21 12:11:54 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-22 06:25:16 -05:00
|
|
|
# Check if all the nodes agree about the cluster configuration
|
|
|
|
def check_config_consistency
|
2013-03-25 08:14:17 -04:00
|
|
|
if !is_config_consistent?
|
|
|
|
cluster_error "[ERR] Nodes don't agree about configuration!"
|
|
|
|
else
|
|
|
|
xputs "[OK] All nodes agree about slots configuration."
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def is_config_consistent?
|
2013-02-22 06:25:16 -05:00
|
|
|
signatures=[]
|
|
|
|
@nodes.each{|n|
|
|
|
|
signatures << n.get_config_signature
|
|
|
|
}
|
2013-03-25 08:14:17 -04:00
|
|
|
return signatures.uniq.length == 1
|
|
|
|
end
|
|
|
|
|
|
|
|
def wait_cluster_join
|
|
|
|
print "Waiting for the cluster to join"
|
|
|
|
while !is_config_consistent?
|
|
|
|
print "."
|
|
|
|
STDOUT.flush
|
|
|
|
sleep 1
|
2013-02-22 06:25:16 -05:00
|
|
|
end
|
2013-03-25 08:14:17 -04:00
|
|
|
print "\n"
|
2013-02-22 06:25:16 -05:00
|
|
|
end
|
|
|
|
|
2011-04-12 11:06:33 -04:00
|
|
|
def alloc_slots
|
2013-11-05 05:24:24 -05:00
|
|
|
nodes_count = @nodes.length
|
|
|
|
masters_count = @nodes.length / (@replicas+1)
|
|
|
|
slots_per_node = ClusterHashSlots / masters_count
|
|
|
|
masters = []
|
|
|
|
slaves = []
|
|
|
|
|
|
|
|
# The first step is to split instances by IP. This is useful as
|
|
|
|
# we'll try to allocate master nodes in different physical machines
|
|
|
|
# (as much as possible) and to allocate slaves of a given master in
|
|
|
|
# different physical machines as well.
|
|
|
|
#
|
|
|
|
# This code assumes just that if the IP is different, than it is more
|
|
|
|
# likely that the instance is running in a different physical host
|
|
|
|
# or at least a different virtual machine.
|
|
|
|
ips = {}
|
2011-04-12 11:06:33 -04:00
|
|
|
@nodes.each{|n|
|
2013-11-05 05:24:24 -05:00
|
|
|
ips[n.info[:host]] = [] if !ips[n.info[:host]]
|
|
|
|
ips[n.info[:host]] << n
|
|
|
|
}
|
|
|
|
|
|
|
|
# Select master instances
|
|
|
|
puts "Using #{masters_count} masters:"
|
|
|
|
while masters.length < masters_count
|
|
|
|
ips.each{|ip,nodes_list|
|
|
|
|
next if nodes_list.length == 0
|
|
|
|
masters << nodes_list.shift
|
|
|
|
puts masters[-1]
|
|
|
|
nodes_count -= 1
|
|
|
|
break if masters.length == masters_count
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
# Alloc slots on masters
|
|
|
|
i = 0
|
2013-11-07 10:12:06 -05:00
|
|
|
masters.each_with_index{|n,masternum|
|
2011-04-12 11:06:33 -04:00
|
|
|
first = i*slots_per_node
|
|
|
|
last = first+slots_per_node-1
|
2013-11-07 10:12:06 -05:00
|
|
|
last = ClusterHashSlots-1 if masternum == masters.length-1
|
2011-04-12 11:06:33 -04:00
|
|
|
n.add_slots first..last
|
|
|
|
i += 1
|
|
|
|
}
|
2013-11-05 05:24:24 -05:00
|
|
|
|
|
|
|
# Select N replicas for every master.
|
|
|
|
# We try to split the replicas among all the IPs with spare nodes
|
|
|
|
# trying to avoid the host where the master is running, if possible.
|
|
|
|
masters.each{|m|
|
|
|
|
i = 0
|
|
|
|
while i < @replicas
|
|
|
|
ips.each{|ip,nodes_list|
|
|
|
|
next if nodes_list.length == 0
|
|
|
|
# Skip instances with the same IP as the master if we
|
|
|
|
# have some more IPs available.
|
|
|
|
next if ip == m.info[:host] && nodes_count > nodes_list.length
|
|
|
|
slave = nodes_list.shift
|
|
|
|
slave.set_as_replica(m.info[:name])
|
|
|
|
nodes_count -= 1
|
|
|
|
i += 1
|
|
|
|
puts "#{m} replica ##{i} is #{slave}"
|
|
|
|
break if masters.length == masters_count
|
|
|
|
}
|
|
|
|
end
|
|
|
|
}
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def flush_nodes_config
|
|
|
|
@nodes.each{|n|
|
|
|
|
n.flush_node_config
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def show_nodes
|
|
|
|
@nodes.each{|n|
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs n.info_string
|
2011-04-12 11:06:33 -04:00
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def join_cluster
|
2011-04-13 09:58:05 -04:00
|
|
|
# We use a brute force approach to make sure the node will meet
|
|
|
|
# each other, that is, sending CLUSTER MEET messages to all the nodes
|
|
|
|
# about the very same node.
|
|
|
|
# Thanks to gossip this information should propagate across all the
|
|
|
|
# cluster in a matter of seconds.
|
|
|
|
first = false
|
|
|
|
@nodes.each{|n|
|
|
|
|
if !first then first = n.info; next; end # Skip the first node
|
|
|
|
n.r.cluster("meet",first[:host],first[:port])
|
|
|
|
}
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def yes_or_die(msg)
|
|
|
|
print "#{msg} (type 'yes' to accept): "
|
|
|
|
STDOUT.flush
|
|
|
|
if !(STDIN.gets.chomp.downcase == "yes")
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "*** Aborting..."
|
2011-04-12 11:06:33 -04:00
|
|
|
exit 1
|
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
2011-04-14 03:41:22 -04:00
|
|
|
|
2011-09-29 09:45:10 -04:00
|
|
|
def load_cluster_info_from_node(nodeaddr)
|
2013-02-27 09:58:41 -05:00
|
|
|
node = ClusterNode.new(nodeaddr)
|
2011-04-14 03:41:22 -04:00
|
|
|
node.connect(:abort => true)
|
|
|
|
node.assert_cluster
|
2011-09-29 06:50:46 -04:00
|
|
|
node.load_info(:getfriends => true)
|
2011-04-14 03:41:22 -04:00
|
|
|
add_node(node)
|
2011-09-29 06:50:46 -04:00
|
|
|
node.friends.each{|f|
|
2013-03-19 04:46:12 -04:00
|
|
|
next if f[:flags].index("noaddr") ||
|
|
|
|
f[:flags].index("disconnected") ||
|
|
|
|
f[:flags].index("fail")
|
2011-09-29 06:50:46 -04:00
|
|
|
fnode = ClusterNode.new(f[:addr])
|
|
|
|
fnode.connect()
|
|
|
|
fnode.load_info()
|
|
|
|
add_node(fnode)
|
|
|
|
}
|
2014-01-17 05:48:42 -05:00
|
|
|
populate_nodes_replicas_info
|
|
|
|
end
|
|
|
|
|
|
|
|
# This function is called by load_cluster_info_from_node in order to
|
|
|
|
# add additional information to every node as a list of replicas.
|
|
|
|
def populate_nodes_replicas_info
|
|
|
|
# Start adding the new field to every node.
|
|
|
|
@nodes.each{|n|
|
|
|
|
n.info[:replicas] = []
|
|
|
|
}
|
|
|
|
|
|
|
|
# Populate the replicas field using the replicate field of slave
|
|
|
|
# nodes.
|
|
|
|
@nodes.each{|n|
|
|
|
|
if n.info[:replicate]
|
|
|
|
master = get_node_by_name(n.info[:replicate])
|
|
|
|
if !master
|
|
|
|
xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}."
|
|
|
|
else
|
|
|
|
master.info[:replicas] << n
|
|
|
|
end
|
|
|
|
end
|
|
|
|
}
|
2011-09-29 09:45:10 -04:00
|
|
|
end
|
|
|
|
|
2011-09-30 12:28:51 -04:00
|
|
|
# Given a list of source nodes return a "resharding plan"
|
|
|
|
# with what slots to move in order to move "numslots" slots to another
|
|
|
|
# instance.
|
|
|
|
def compute_reshard_table(sources,numslots)
|
|
|
|
moved = []
|
2011-09-30 12:41:25 -04:00
|
|
|
# Sort from bigger to smaller instance, for two reasons:
|
2013-02-15 11:11:55 -05:00
|
|
|
# 1) If we take less slots than instances it is better to start
|
|
|
|
# getting from the biggest instances.
|
|
|
|
# 2) We take one slot more from the first instance in the case of not
|
|
|
|
# perfect divisibility. Like we have 3 nodes and need to get 10
|
|
|
|
# slots, we take 4 from the first, and 3 from the rest. So the
|
|
|
|
# biggest is always the first.
|
2011-09-30 12:41:25 -04:00
|
|
|
sources = sources.sort{|a,b| b.slots.length <=> a.slots.length}
|
2013-02-15 11:11:55 -05:00
|
|
|
source_tot_slots = sources.inject(0) {|sum,source|
|
|
|
|
sum+source.slots.length
|
|
|
|
}
|
2011-09-30 12:41:25 -04:00
|
|
|
sources.each_with_index{|s,i|
|
2011-09-30 12:28:51 -04:00
|
|
|
# Every node will provide a number of slots proportional to the
|
|
|
|
# slots it has assigned.
|
2011-10-13 09:16:03 -04:00
|
|
|
n = (numslots.to_f/source_tot_slots*s.slots.length)
|
2011-09-30 12:41:25 -04:00
|
|
|
if i == 0
|
|
|
|
n = n.ceil
|
|
|
|
else
|
|
|
|
n = n.floor
|
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
s.slots.keys.sort[(0...n)].each{|slot|
|
|
|
|
if moved.length < numslots
|
|
|
|
moved << {:source => s, :slot => slot}
|
|
|
|
end
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return moved
|
|
|
|
end
|
|
|
|
|
|
|
|
def show_reshard_table(table)
|
|
|
|
table.each{|e|
|
2011-09-30 13:21:36 -04:00
|
|
|
puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}"
|
2011-09-30 12:28:51 -04:00
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2011-09-30 16:18:07 -04:00
|
|
|
def move_slot(source,target,slot,o={})
|
2011-09-30 13:21:36 -04:00
|
|
|
# We start marking the slot as importing in the destination node,
|
|
|
|
# and the slot as migrating in the target host. Note that the order of
|
2013-02-15 11:11:55 -05:00
|
|
|
# the operations is important, as otherwise a client may be redirected
|
|
|
|
# to the target node that does not yet know it is importing this slot.
|
2013-03-21 12:11:54 -04:00
|
|
|
print "Moving slot #{slot} from #{source} to #{target}: "; STDOUT.flush
|
2011-09-30 16:18:07 -04:00
|
|
|
target.r.cluster("setslot",slot,"importing",source.info[:name])
|
2013-02-20 09:29:44 -05:00
|
|
|
source.r.cluster("setslot",slot,"migrating",target.info[:name])
|
2011-09-30 13:21:36 -04:00
|
|
|
# Migrate all the keys from source to target using the MIGRATE command
|
2011-09-30 16:18:07 -04:00
|
|
|
while true
|
|
|
|
keys = source.r.cluster("getkeysinslot",slot,10)
|
|
|
|
break if keys.length == 0
|
|
|
|
keys.each{|key|
|
2012-03-31 05:28:37 -04:00
|
|
|
source.r.migrate(target.info[:host],target.info[:port],key,0,1000)
|
2011-09-30 16:18:07 -04:00
|
|
|
print "." if o[:verbose]
|
|
|
|
STDOUT.flush
|
|
|
|
}
|
|
|
|
end
|
|
|
|
puts
|
|
|
|
# Set the new node as the owner of the slot in all the known nodes.
|
|
|
|
@nodes.each{|n|
|
|
|
|
n.r.cluster("setslot",slot,"node",target.info[:name])
|
|
|
|
}
|
2011-09-30 13:21:36 -04:00
|
|
|
end
|
|
|
|
|
2011-09-29 09:45:10 -04:00
|
|
|
# redis-trib subcommands implementations
|
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
def check_cluster_cmd(argv,opt)
|
|
|
|
load_cluster_info_from_node(argv[0])
|
2013-02-21 07:00:41 -05:00
|
|
|
check_cluster
|
|
|
|
end
|
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
def fix_cluster_cmd(argv,opt)
|
2013-02-21 07:00:41 -05:00
|
|
|
@fix = true
|
2013-10-11 11:33:19 -04:00
|
|
|
load_cluster_info_from_node(argv[0])
|
2011-04-14 03:41:22 -04:00
|
|
|
check_cluster
|
|
|
|
end
|
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
def reshard_cluster_cmd(argv,opt)
|
|
|
|
load_cluster_info_from_node(argv[0])
|
2013-02-21 07:00:41 -05:00
|
|
|
check_cluster
|
|
|
|
if @errors.length != 0
|
2013-03-22 12:39:43 -04:00
|
|
|
puts "*** Please fix your cluster problems before resharding"
|
2011-09-29 10:06:49 -04:00
|
|
|
exit 1
|
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
numslots = 0
|
2013-02-14 06:55:34 -05:00
|
|
|
while numslots <= 0 or numslots > ClusterHashSlots
|
|
|
|
print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? "
|
2011-09-30 12:28:51 -04:00
|
|
|
numslots = STDIN.gets.to_i
|
|
|
|
end
|
|
|
|
target = nil
|
|
|
|
while not target
|
|
|
|
print "What is the receiving node ID? "
|
|
|
|
target = get_node_by_name(STDIN.gets.chop)
|
2013-03-21 13:17:06 -04:00
|
|
|
if !target || target.has_flag?("slave")
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "*** The specified node is not known or not a master, please retry."
|
2013-03-21 13:17:06 -04:00
|
|
|
target = nil
|
2011-09-30 12:28:51 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
sources = []
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "Please enter all the source node IDs."
|
|
|
|
xputs " Type 'all' to use all the nodes as source nodes for the hash slots."
|
|
|
|
xputs " Type 'done' once you entered all the source nodes IDs."
|
2011-09-30 12:28:51 -04:00
|
|
|
while true
|
|
|
|
print "Source node ##{sources.length+1}:"
|
|
|
|
line = STDIN.gets.chop
|
|
|
|
src = get_node_by_name(line)
|
|
|
|
if line == "done"
|
|
|
|
if sources.length == 0
|
|
|
|
puts "No source nodes given, operation aborted"
|
|
|
|
exit 1
|
|
|
|
else
|
|
|
|
break
|
|
|
|
end
|
|
|
|
elsif line == "all"
|
|
|
|
@nodes.each{|n|
|
|
|
|
next if n.info[:name] == target.info[:name]
|
2013-03-21 13:17:06 -04:00
|
|
|
next if n.has_flag?("slave")
|
2011-09-30 12:28:51 -04:00
|
|
|
sources << n
|
|
|
|
}
|
|
|
|
break
|
2013-03-22 07:28:06 -04:00
|
|
|
elsif !src || src.has_flag?("slave")
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "*** The specified node is not known or is not a master, please retry."
|
2011-09-30 12:28:51 -04:00
|
|
|
elsif src.info[:name] == target.info[:name]
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs "*** It is not possible to use the target node as source node."
|
2011-09-30 12:28:51 -04:00
|
|
|
else
|
|
|
|
sources << src
|
|
|
|
end
|
2011-09-29 10:06:49 -04:00
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
puts "\nReady to move #{numslots} slots."
|
|
|
|
puts " Source nodes:"
|
|
|
|
sources.each{|s| puts " "+s.info_string}
|
|
|
|
puts " Destination node:"
|
|
|
|
puts " #{target.info_string}"
|
|
|
|
reshard_table = compute_reshard_table(sources,numslots)
|
2011-09-30 13:21:36 -04:00
|
|
|
puts " Resharding plan:"
|
2011-09-30 12:28:51 -04:00
|
|
|
show_reshard_table(reshard_table)
|
2011-09-30 13:21:36 -04:00
|
|
|
print "Do you want to proceed with the proposed reshard plan (yes/no)? "
|
|
|
|
yesno = STDIN.gets.chop
|
|
|
|
exit(1) if (yesno != "yes")
|
|
|
|
reshard_table.each{|e|
|
2011-09-30 16:18:07 -04:00
|
|
|
move_slot(e[:source],target,e[:slot],:verbose=>true)
|
2011-09-30 13:21:36 -04:00
|
|
|
}
|
2011-09-29 10:06:49 -04:00
|
|
|
end
|
|
|
|
|
2013-11-05 05:24:24 -05:00
|
|
|
# This is an helper function for create_cluster_cmd that verifies if
|
|
|
|
# the number of nodes and the specified replicas have a valid configuration
|
|
|
|
# where there are at least three master nodes and enough replicas per node.
|
|
|
|
def check_create_parameters
|
|
|
|
masters = @nodes.length/(@replicas+1)
|
|
|
|
if masters < 3
|
|
|
|
puts "*** ERROR: Invalid configuration for cluster creation."
|
|
|
|
puts "*** Redis Cluster requires at least 3 master nodes."
|
|
|
|
puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node."
|
|
|
|
puts "*** At least #{3*(@replicas+1)} nodes are required."
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
def create_cluster_cmd(argv,opt)
|
2013-11-05 05:24:24 -05:00
|
|
|
opt = {'replicas' => 0}.merge(opt)
|
|
|
|
@replicas = opt['replicas'].to_i
|
|
|
|
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Creating cluster"
|
2013-10-11 11:33:19 -04:00
|
|
|
argv[0..-1].each{|n|
|
2011-04-14 03:41:22 -04:00
|
|
|
node = ClusterNode.new(n)
|
|
|
|
node.connect(:abort => true)
|
|
|
|
node.assert_cluster
|
2011-10-04 09:53:07 -04:00
|
|
|
node.load_info
|
2011-04-14 03:41:22 -04:00
|
|
|
node.assert_empty
|
|
|
|
add_node(node)
|
|
|
|
}
|
2013-11-05 05:24:24 -05:00
|
|
|
check_create_parameters
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..."
|
2011-04-14 03:41:22 -04:00
|
|
|
alloc_slots
|
|
|
|
show_nodes
|
|
|
|
yes_or_die "Can I set the above configuration?"
|
|
|
|
flush_nodes_config
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Nodes configuration updated"
|
|
|
|
xputs ">>> Sending CLUSTER MEET messages to join the cluster"
|
2011-04-14 03:41:22 -04:00
|
|
|
join_cluster
|
2013-03-25 08:14:17 -04:00
|
|
|
# Give one second for the join to start, in order to avoid that
|
|
|
|
# wait_cluster_join will find all the nodes agree about the config as
|
|
|
|
# they are still empty with unassigned slots.
|
|
|
|
sleep 1
|
|
|
|
wait_cluster_join
|
2013-11-05 05:24:24 -05:00
|
|
|
flush_nodes_config # Useful for the replicas
|
2011-04-14 03:41:22 -04:00
|
|
|
check_cluster
|
|
|
|
end
|
2013-02-27 09:58:41 -05:00
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
def addnode_cluster_cmd(argv,opt)
|
|
|
|
xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}"
|
2013-02-27 09:58:41 -05:00
|
|
|
|
|
|
|
# Check the existing cluster
|
2013-10-11 11:33:19 -04:00
|
|
|
load_cluster_info_from_node(argv[1])
|
2013-02-27 09:58:41 -05:00
|
|
|
check_cluster
|
|
|
|
|
2014-01-17 05:48:42 -05:00
|
|
|
# If --master-id was specified, try to resolve it now so that we
|
|
|
|
# abort before starting with the node configuration.
|
|
|
|
if opt['slave']
|
|
|
|
if opt['master-id']
|
|
|
|
master = get_node_by_name(opt['master-id'])
|
|
|
|
if !master
|
|
|
|
xputs "[ERR] No such master ID #{opt['master-id']}"
|
|
|
|
end
|
|
|
|
else
|
|
|
|
master = get_master_with_least_replicas
|
|
|
|
xputs "Automatically selected master #{master}"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-27 09:58:41 -05:00
|
|
|
# Add the new node
|
2013-10-11 11:33:19 -04:00
|
|
|
new = ClusterNode.new(argv[0])
|
2013-02-27 09:58:41 -05:00
|
|
|
new.connect(:abort => true)
|
|
|
|
new.assert_cluster
|
|
|
|
new.load_info
|
|
|
|
new.assert_empty
|
|
|
|
first = @nodes.first.info
|
2014-01-17 05:48:42 -05:00
|
|
|
add_node(new)
|
2013-02-27 09:58:41 -05:00
|
|
|
|
|
|
|
# Send CLUSTER MEET command to the new node
|
2013-03-22 12:39:43 -04:00
|
|
|
xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster."
|
2013-02-27 09:58:41 -05:00
|
|
|
new.r.cluster("meet",first[:host],first[:port])
|
2014-01-17 05:48:42 -05:00
|
|
|
|
|
|
|
# Additional configuration is needed if the node is added as
|
|
|
|
# a slave.
|
|
|
|
if opt['slave']
|
|
|
|
wait_cluster_join
|
|
|
|
xputs ">>> Configure node as replica of #{master}."
|
|
|
|
new.r.cluster("replicate",master.info[:name])
|
|
|
|
end
|
|
|
|
xputs "[OK] New node added correctly."
|
2013-02-27 09:58:41 -05:00
|
|
|
end
|
2013-02-27 12:02:22 -05:00
|
|
|
|
2014-01-16 12:02:16 -05:00
|
|
|
def delnode_cluster_cmd(argv,opt)
|
|
|
|
id = argv[1].downcase
|
|
|
|
xputs ">>> Removing node #{id} from cluster #{argv[0]}"
|
|
|
|
|
|
|
|
# Load cluster information
|
|
|
|
load_cluster_info_from_node(argv[0])
|
|
|
|
|
|
|
|
# Check if the node exists and is not empty
|
|
|
|
node = get_node_by_name(id)
|
|
|
|
|
|
|
|
if !node
|
|
|
|
xputs "[ERR] No such node ID #{id}"
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
if node.slots.length != 0
|
|
|
|
xputs "[ERR] Node #{node} is not empty! Reshard data away and try again."
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
# Send CLUSTER FORGET to all the nodes but the node to remove
|
|
|
|
xputs ">>> Sending CLUSTER FORGET messages to the cluster..."
|
|
|
|
@nodes.each{|n|
|
|
|
|
next if n == node
|
2014-02-10 10:59:09 -05:00
|
|
|
if n.info[:replicate] && n.info[:replicate].downcase == id
|
2014-01-16 12:02:16 -05:00
|
|
|
# Reconfigure the slave to replicate with some other node
|
|
|
|
master = get_master_with_least_replicas
|
2014-02-10 12:48:36 -05:00
|
|
|
xputs ">>> #{n} as replica of #{master}"
|
2014-01-16 12:02:16 -05:00
|
|
|
n.r.cluster("replicate",master.info[:name])
|
|
|
|
end
|
|
|
|
n.r.cluster("forget",argv[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
# Finally shutdown the node
|
|
|
|
xputs ">>> SHUTDOWN the node."
|
|
|
|
node.r.shutdown
|
|
|
|
end
|
|
|
|
|
2014-01-24 09:06:01 -05:00
|
|
|
def set_timeout_cluster_cmd(argv,opt)
|
|
|
|
timeout = argv[1].to_i
|
|
|
|
if timeout < 100
|
|
|
|
puts "Setting a node timeout of less than 100 milliseconds is a bad idea."
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
# Load cluster information
|
|
|
|
load_cluster_info_from_node(argv[0])
|
|
|
|
ok_count = 0
|
|
|
|
err_count = 0
|
|
|
|
|
|
|
|
# Send CLUSTER FORGET to all the nodes but the node to remove
|
|
|
|
xputs ">>> Reconfiguring node timeout in every cluster node..."
|
|
|
|
@nodes.each{|n|
|
|
|
|
begin
|
|
|
|
n.r.config("set","cluster-node-timeout",timeout)
|
|
|
|
n.r.config("rewrite")
|
|
|
|
ok_count += 1
|
|
|
|
xputs "*** New timeout set for #{n}"
|
|
|
|
rescue => e
|
|
|
|
puts "ERR setting node-timeot for #{n}: #{e}"
|
|
|
|
err_count += 1
|
|
|
|
end
|
|
|
|
}
|
|
|
|
xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR."
|
|
|
|
end
|
|
|
|
|
2014-01-17 04:29:40 -05:00
|
|
|
def help_cluster_cmd(argv,opt)
|
2013-02-27 12:02:22 -05:00
|
|
|
show_help
|
|
|
|
exit 0
|
|
|
|
end
|
2013-10-11 11:33:19 -04:00
|
|
|
|
|
|
|
# Parse the options for the specific command "cmd".
|
|
|
|
# Returns an hash populate with option => value pairs, and the index of
|
|
|
|
# the first non-option argument in ARGV.
|
|
|
|
def parse_options(cmd)
|
|
|
|
idx = 1 ; # Current index into ARGV
|
|
|
|
options={}
|
|
|
|
while idx < ARGV.length && ARGV[idx][0..1] == '--'
|
|
|
|
if ARGV[idx][0..1] == "--"
|
|
|
|
option = ARGV[idx][2..-1]
|
|
|
|
idx += 1
|
|
|
|
if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil
|
|
|
|
puts "Unknown option '#{option}' for command '#{cmd}'"
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
if ALLOWED_OPTIONS[cmd][option]
|
|
|
|
value = ARGV[idx]
|
|
|
|
idx += 1
|
|
|
|
else
|
|
|
|
value = true
|
|
|
|
end
|
|
|
|
options[option] = value
|
|
|
|
else
|
|
|
|
# Remaining arguments are not options.
|
|
|
|
break
|
|
|
|
end
|
|
|
|
end
|
|
|
|
return options,idx
|
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
COMMANDS={
|
2013-02-21 07:00:41 -05:00
|
|
|
"create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"],
|
|
|
|
"check" => ["check_cluster_cmd", 2, "host:port"],
|
|
|
|
"fix" => ["fix_cluster_cmd", 2, "host:port"],
|
2013-02-27 09:58:41 -05:00
|
|
|
"reshard" => ["reshard_cluster_cmd", 2, "host:port"],
|
2014-01-24 09:06:01 -05:00
|
|
|
"add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"],
|
|
|
|
"del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"],
|
|
|
|
"set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"],
|
2013-02-27 12:02:22 -05:00
|
|
|
"help" => ["help_cluster_cmd", 1, "(show this help)"]
|
2011-04-11 10:41:06 -04:00
|
|
|
}
|
|
|
|
|
2013-10-11 11:33:19 -04:00
|
|
|
ALLOWED_OPTIONS={
|
2014-01-17 05:48:42 -05:00
|
|
|
"create" => {"replicas" => true},
|
2014-02-10 06:34:21 -05:00
|
|
|
"add-node" => {"slave" => false, "master-id" => true}
|
2013-10-11 11:33:19 -04:00
|
|
|
}
|
|
|
|
|
2013-02-27 12:02:22 -05:00
|
|
|
def show_help
|
2014-01-16 10:23:28 -05:00
|
|
|
puts "Usage: redis-trib <command> <options> <arguments ...>\n\n"
|
2011-04-13 04:38:56 -04:00
|
|
|
COMMANDS.each{|k,v|
|
2014-01-16 10:23:28 -05:00
|
|
|
o = ""
|
2014-01-24 09:06:01 -05:00
|
|
|
puts " #{k.ljust(15)} #{v[2]}"
|
2014-01-16 10:23:28 -05:00
|
|
|
if ALLOWED_OPTIONS[k]
|
|
|
|
ALLOWED_OPTIONS[k].each{|optname,has_arg|
|
2014-01-24 09:06:01 -05:00
|
|
|
puts " --#{optname}" + (has_arg ? " <arg>" : "")
|
2014-01-16 10:23:28 -05:00
|
|
|
}
|
|
|
|
end
|
2011-04-13 04:38:56 -04:00
|
|
|
}
|
2014-01-24 09:06:01 -05:00
|
|
|
puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n"
|
2013-02-27 12:02:22 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
# Sanity check
|
|
|
|
if ARGV.length == 0
|
|
|
|
show_help
|
2011-04-11 10:41:06 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
rt = RedisTrib.new
|
|
|
|
cmd_spec = COMMANDS[ARGV[0].downcase]
|
|
|
|
if !cmd_spec
|
|
|
|
puts "Unknown redis-trib subcommand '#{ARGV[0]}'"
|
|
|
|
exit 1
|
|
|
|
end
|
2013-10-11 11:33:19 -04:00
|
|
|
|
|
|
|
# Parse options
|
|
|
|
cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase)
|
|
|
|
rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1))
|
2011-04-11 10:41:06 -04:00
|
|
|
|
|
|
|
# Dispatch
|
2013-10-11 11:33:19 -04:00
|
|
|
rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options)
|