2011-04-11 10:41:06 -04:00
|
|
|
#!/usr/bin/env ruby
|
|
|
|
|
2011-09-30 19:31:46 -04:00
|
|
|
# TODO (temporary here, we'll move this into the Github issues once
|
2013-01-16 12:00:20 -05:00
|
|
|
# redis-trib initial implementation is completed).
|
2011-09-30 16:18:07 -04:00
|
|
|
#
|
2011-09-30 19:31:46 -04:00
|
|
|
# - Make sure that if the rehashing fails in the middle redis-trib will try
|
|
|
|
# to recover.
|
|
|
|
# - When redis-trib performs a cluster check, if it detects a slot move in
|
|
|
|
# progress it should prompt the user to continue the move from where it
|
|
|
|
# stopped.
|
|
|
|
# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop
|
|
|
|
# while rehashing, and performing the best cleanup possible if the user
|
|
|
|
# forces the quit.
|
|
|
|
# - When doing "fix" set a global Fix to true, and prompt the user to
|
|
|
|
# fix the problem if automatically fixable every time there is something
|
|
|
|
# to fix. For instance:
|
|
|
|
# 1) If there is a node that pretend to receive a slot, or to migrate a
|
|
|
|
# slot, but has no entries in that slot, fix it.
|
|
|
|
# 2) If there is a node having keys in slots that are not owned by it
|
2013-01-16 12:00:20 -05:00
|
|
|
# fix this condition moving the entries in the same node.
|
2011-09-30 19:31:46 -04:00
|
|
|
# 3) Perform more possibly slow tests about the state of the cluster.
|
|
|
|
# 4) When aborted slot migration is detected, fix it.
|
2011-09-30 16:18:07 -04:00
|
|
|
|
2011-04-11 10:41:06 -04:00
|
|
|
require 'rubygems'
|
|
|
|
require 'redis'
|
|
|
|
|
2013-02-14 06:55:34 -05:00
|
|
|
ClusterHashSlots = 16384
|
2011-04-12 11:06:33 -04:00
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def xputs(s)
|
|
|
|
printf s
|
|
|
|
STDOUT.flush
|
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
class ClusterNode
|
|
|
|
def initialize(addr)
|
|
|
|
s = addr.split(":")
|
2011-04-11 10:41:06 -04:00
|
|
|
if s.length != 2
|
2011-04-13 12:50:50 -04:00
|
|
|
puts "Invalid node name #{addr}"
|
2011-04-11 10:41:06 -04:00
|
|
|
exit 1
|
|
|
|
end
|
2011-04-13 12:40:51 -04:00
|
|
|
@r = nil
|
2011-09-29 08:44:15 -04:00
|
|
|
@info = {}
|
|
|
|
@info[:host] = s[0]
|
|
|
|
@info[:port] = s[1]
|
|
|
|
@info[:slots] = {}
|
2013-03-21 11:31:53 -04:00
|
|
|
@info[:migrating] = {}
|
|
|
|
@info[:importing] = {}
|
2011-09-29 08:44:15 -04:00
|
|
|
@dirty = false # True if we need to flush slots info into node.
|
2011-09-28 13:08:35 -04:00
|
|
|
@friends = []
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
|
2011-09-29 06:50:46 -04:00
|
|
|
def friends
|
|
|
|
@friends
|
|
|
|
end
|
|
|
|
|
|
|
|
def slots
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots]
|
2011-09-29 06:50:46 -04:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
def has_flag?(flag)
|
|
|
|
@info[:flags].index(flag)
|
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def to_s
|
2011-09-29 08:44:15 -04:00
|
|
|
"#{@info[:host]}:#{@info[:port]}"
|
2011-04-11 10:58:47 -04:00
|
|
|
end
|
|
|
|
|
2011-04-13 12:40:51 -04:00
|
|
|
def connect(o={})
|
2011-09-28 13:08:35 -04:00
|
|
|
return if @r
|
2011-04-11 10:58:47 -04:00
|
|
|
xputs "Connecting to node #{self}: "
|
2011-04-11 10:41:06 -04:00
|
|
|
begin
|
2011-09-29 08:44:15 -04:00
|
|
|
@r = Redis.new(:host => @info[:host], :port => @info[:port])
|
2011-04-11 10:58:47 -04:00
|
|
|
@r.ping
|
2011-04-11 10:41:06 -04:00
|
|
|
rescue
|
|
|
|
puts "ERROR"
|
2011-04-11 10:58:47 -04:00
|
|
|
puts "Sorry, can't connect to node #{self}"
|
2011-04-13 12:40:51 -04:00
|
|
|
exit 1 if o[:abort]
|
|
|
|
@r = nil
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
puts "OK"
|
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def assert_cluster
|
|
|
|
info = @r.info
|
|
|
|
if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0
|
|
|
|
puts "Error: Node #{self} is not configured as a cluster node."
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-04-11 12:26:00 -04:00
|
|
|
def assert_empty
|
|
|
|
if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) ||
|
|
|
|
(@r.info['db0'])
|
2013-02-27 09:58:41 -05:00
|
|
|
puts "Error: Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0."
|
2011-04-11 12:26:00 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-09-28 13:08:35 -04:00
|
|
|
def load_info(o={})
|
|
|
|
self.connect
|
|
|
|
nodes = @r.cluster("nodes").split("\n")
|
|
|
|
nodes.each{|n|
|
|
|
|
# name addr flags role ping_sent ping_recv link_status slots
|
2011-09-30 19:31:46 -04:00
|
|
|
split = n.split
|
|
|
|
name,addr,flags,role,ping_sent,ping_recv,link_status = split[0..6]
|
|
|
|
slots = split[7..-1]
|
2011-09-28 13:08:35 -04:00
|
|
|
info = {
|
|
|
|
:name => name,
|
|
|
|
:addr => addr,
|
|
|
|
:flags => flags.split(","),
|
|
|
|
:role => role,
|
|
|
|
:ping_sent => ping_sent.to_i,
|
|
|
|
:ping_recv => ping_recv.to_i,
|
|
|
|
:link_status => link_status
|
|
|
|
}
|
|
|
|
if info[:flags].index("myself")
|
2011-09-29 08:44:15 -04:00
|
|
|
@info = @info.merge(info)
|
|
|
|
@info[:slots] = {}
|
2011-09-30 19:31:46 -04:00
|
|
|
slots.each{|s|
|
|
|
|
if s[0..0] == '['
|
2013-03-22 07:53:37 -04:00
|
|
|
if s.index("->-") # Migrating
|
2013-03-21 11:31:53 -04:00
|
|
|
slot,dst = s[1..-1].split("->-")
|
|
|
|
@info[:migrating][slot] = dst
|
2013-03-22 07:53:37 -04:00
|
|
|
elsif s.index("-<-") # Importing
|
2013-03-21 11:31:53 -04:00
|
|
|
slot,src = s[1..-1].split("-<-")
|
|
|
|
@info[:importing][slot] = src
|
|
|
|
end
|
2011-09-30 19:31:46 -04:00
|
|
|
elsif s.index("-")
|
2011-09-28 13:08:35 -04:00
|
|
|
start,stop = s.split("-")
|
|
|
|
self.add_slots((start.to_i)..(stop.to_i))
|
|
|
|
else
|
|
|
|
self.add_slots((s.to_i)..(s.to_i))
|
|
|
|
end
|
2011-09-29 09:31:02 -04:00
|
|
|
} if slots
|
2011-09-28 13:08:35 -04:00
|
|
|
@dirty = false
|
2011-09-29 07:18:09 -04:00
|
|
|
@r.cluster("info").split("\n").each{|e|
|
|
|
|
k,v=e.split(":")
|
|
|
|
k = k.to_sym
|
2011-09-29 09:04:23 -04:00
|
|
|
v.chop!
|
2011-09-29 07:18:09 -04:00
|
|
|
if k != :cluster_state
|
|
|
|
@info[k] = v.to_i
|
|
|
|
else
|
|
|
|
@info[k] = v
|
|
|
|
end
|
|
|
|
}
|
2011-09-28 13:08:35 -04:00
|
|
|
elsif o[:getfriends]
|
|
|
|
@friends << info
|
|
|
|
end
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2011-04-12 11:06:33 -04:00
|
|
|
def add_slots(slots)
|
|
|
|
slots.each{|s|
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots][s] = :new
|
2011-04-12 11:06:33 -04:00
|
|
|
}
|
|
|
|
@dirty = true
|
|
|
|
end
|
|
|
|
|
|
|
|
def flush_node_config
|
|
|
|
return if !@dirty
|
|
|
|
new = []
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots].each{|s,val|
|
2011-04-12 11:06:33 -04:00
|
|
|
if val == :new
|
|
|
|
new << s
|
2011-09-29 08:44:15 -04:00
|
|
|
@info[:slots][s] = true
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
}
|
|
|
|
@r.cluster("addslots",*new)
|
|
|
|
@dirty = false
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def info_string
|
2011-04-13 12:40:51 -04:00
|
|
|
# We want to display the hash slots assigned to this node
|
2011-04-13 14:17:11 -04:00
|
|
|
# as ranges, like in: "1-5,8-9,20-25,30"
|
2011-04-13 12:40:51 -04:00
|
|
|
#
|
|
|
|
# Note: this could be easily written without side effects,
|
|
|
|
# we use 'slots' just to split the computation into steps.
|
|
|
|
|
|
|
|
# First step: we want an increasing array of integers
|
|
|
|
# for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
|
2011-09-29 08:44:15 -04:00
|
|
|
slots = @info[:slots].keys.sort
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-01-16 12:00:20 -05:00
|
|
|
# As we want to aggregate adjacent slots we convert all the
|
2011-04-13 12:40:51 -04:00
|
|
|
# slot integers into ranges (with just one element)
|
|
|
|
# So we have something like [1..1,2..2, ... and so forth.
|
2011-04-13 12:46:29 -04:00
|
|
|
slots.map!{|x| x..x}
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-01-16 12:00:20 -05:00
|
|
|
# Finally we group ranges with adjacent elements.
|
2011-04-13 12:40:51 -04:00
|
|
|
slots = slots.reduce([]) {|a,b|
|
|
|
|
if !a.empty? && b.first == (a[-1].last)+1
|
|
|
|
a[0..-2] + [(a[-1].first)..(b.last)]
|
2011-04-12 11:06:33 -04:00
|
|
|
else
|
2011-04-13 12:40:51 -04:00
|
|
|
a + [b]
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
2011-04-13 12:40:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
# Now our task is easy, we just convert ranges with just one
|
|
|
|
# element into a number, and a real range into a start-end format.
|
|
|
|
# Finally we join the array using the comma as separator.
|
|
|
|
slots = slots.map{|x|
|
|
|
|
x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}"
|
2011-04-12 11:06:33 -04:00
|
|
|
}.join(",")
|
2011-04-13 12:40:51 -04:00
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
"[#{@info[:cluster_state].upcase} #{(self.info[:flags]-["myself"]).join(",")}] #{self.info[:name]} #{self.to_s} slots:#{slots} (#{self.slots.length} slots)"
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
2011-04-13 09:58:05 -04:00
|
|
|
|
2013-02-22 06:25:16 -05:00
|
|
|
# Return a single string representing nodes and associated slots.
|
|
|
|
# TODO: remove slaves from config when slaves will be handled
|
|
|
|
# by Redis Cluster.
|
|
|
|
def get_config_signature
|
|
|
|
config = []
|
|
|
|
@r.cluster("nodes").each_line{|l|
|
|
|
|
s = l.split
|
|
|
|
slots = s[7..-1].select {|x| x[0..0] != "["}
|
2013-02-28 07:12:56 -05:00
|
|
|
next if slots.length == 0
|
2013-02-22 06:25:16 -05:00
|
|
|
config << s[0]+":"+(slots.sort.join(","))
|
|
|
|
}
|
|
|
|
config.sort.join("|")
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def info
|
2011-09-29 08:44:15 -04:00
|
|
|
@info
|
2011-04-13 09:58:05 -04:00
|
|
|
end
|
2011-04-12 11:06:33 -04:00
|
|
|
|
|
|
|
def is_dirty?
|
|
|
|
@dirty
|
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def r
|
|
|
|
@r
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
class RedisTrib
|
2011-04-12 11:06:33 -04:00
|
|
|
def initialize
|
|
|
|
@nodes = []
|
2013-02-21 07:06:59 -05:00
|
|
|
@fix = false
|
|
|
|
@errors = []
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
2011-04-11 10:58:47 -04:00
|
|
|
def check_arity(req_args, num_args)
|
|
|
|
if ((req_args > 0 and num_args != req_args) ||
|
|
|
|
(req_args < 0 and num_args < req_args.abs))
|
|
|
|
puts "Wrong number of arguments for specified sub command"
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-04-13 12:40:51 -04:00
|
|
|
def add_node(node)
|
|
|
|
@nodes << node
|
|
|
|
end
|
|
|
|
|
2013-03-22 07:59:18 -04:00
|
|
|
def cluster_error(msg)
|
|
|
|
@errors << msg
|
|
|
|
puts msg
|
|
|
|
end
|
|
|
|
|
2011-09-29 10:06:49 -04:00
|
|
|
def get_node_by_name(name)
|
|
|
|
@nodes.each{|n|
|
|
|
|
return n if n.info[:name] == name.downcase
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
end
|
|
|
|
|
2011-04-13 09:58:05 -04:00
|
|
|
def check_cluster
|
2013-03-22 07:47:49 -04:00
|
|
|
puts ">>> Performing Cluster Check (using node #{@nodes[0]})"
|
2011-04-13 12:40:51 -04:00
|
|
|
show_nodes
|
2013-02-22 06:25:16 -05:00
|
|
|
check_config_consistency
|
2013-03-21 12:11:54 -04:00
|
|
|
check_open_slots
|
2013-02-21 07:00:41 -05:00
|
|
|
check_slots_coverage
|
|
|
|
end
|
|
|
|
|
|
|
|
# Merge slots of every known node. If the resulting slots are equal
|
|
|
|
# to ClusterHashSlots, then all slots are served.
|
|
|
|
def covered_slots
|
2011-09-29 06:50:46 -04:00
|
|
|
slots = {}
|
|
|
|
@nodes.each{|n|
|
|
|
|
slots = slots.merge(n.slots)
|
|
|
|
}
|
2013-02-21 07:00:41 -05:00
|
|
|
slots
|
|
|
|
end
|
|
|
|
|
|
|
|
def check_slots_coverage
|
2013-03-22 07:47:49 -04:00
|
|
|
puts ">>> Check slots coverage..."
|
2013-02-21 07:00:41 -05:00
|
|
|
slots = covered_slots
|
2013-02-14 06:55:34 -05:00
|
|
|
if slots.length == ClusterHashSlots
|
|
|
|
puts "[OK] All #{ClusterHashSlots} slots covered."
|
2011-09-29 06:50:46 -04:00
|
|
|
else
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
2013-02-21 07:00:41 -05:00
|
|
|
"[ERR] Not all #{ClusterHashSlots} slots are covered by nodes."
|
2013-02-21 10:58:27 -05:00
|
|
|
fix_slots_coverage if @fix
|
2011-09-29 06:50:46 -04:00
|
|
|
end
|
2013-02-21 07:00:41 -05:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
def check_open_slots
|
2013-03-22 07:47:49 -04:00
|
|
|
puts ">>> Check for open slots..."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
if n.info[:migrating].size > 0
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
|
|
|
"[WARNING] Node #{n} has slots in migrating state."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots += n.info[:migrating].keys
|
|
|
|
elsif n.info[:importing].size > 0
|
2013-03-22 07:59:18 -04:00
|
|
|
cluster_error \
|
|
|
|
"[WARNING] Node #{n} has slots in importing state."
|
2013-03-21 12:11:54 -04:00
|
|
|
open_slots += n.info[:importing].keys
|
|
|
|
end
|
|
|
|
}
|
|
|
|
open_slots.uniq!
|
2013-03-21 12:22:14 -04:00
|
|
|
if open_slots.length > 0
|
2013-03-21 12:11:54 -04:00
|
|
|
puts "[WARNING] The following slots are open: #{open_slots.join(",")}"
|
|
|
|
end
|
|
|
|
if @fix
|
|
|
|
open_slots.each{|slot| fix_open_slot slot}
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-21 12:10:06 -05:00
|
|
|
def nodes_with_keys_in_slot(slot)
|
|
|
|
nodes = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0
|
|
|
|
}
|
|
|
|
nodes
|
|
|
|
end
|
|
|
|
|
2013-02-21 07:00:41 -05:00
|
|
|
def fix_slots_coverage
|
2013-02-21 12:10:06 -05:00
|
|
|
not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys
|
|
|
|
puts "\nFixing slots coverage..."
|
|
|
|
puts "List of not covered slots: " + not_covered.join(",")
|
|
|
|
|
|
|
|
# For every slot, take action depending on the actual condition:
|
|
|
|
# 1) No node has keys for this slot.
|
|
|
|
# 2) A single node has keys for this slot.
|
|
|
|
# 3) Multiple nodes have keys for this slot.
|
|
|
|
slots = {}
|
|
|
|
not_covered.each{|slot|
|
|
|
|
nodes = nodes_with_keys_in_slot(slot)
|
|
|
|
slots[slot] = nodes
|
2013-02-22 04:05:07 -05:00
|
|
|
puts "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join}"
|
2013-02-21 12:10:06 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
none = slots.select {|k,v| v.length == 0}
|
|
|
|
single = slots.select {|k,v| v.length == 1}
|
|
|
|
multi = slots.select {|k,v| v.length > 1}
|
|
|
|
|
2013-02-22 04:23:53 -05:00
|
|
|
# Handle case "1": keys in no node.
|
|
|
|
if none.length > 0
|
|
|
|
puts "The folowing uncovered slots have no keys across the cluster:"
|
|
|
|
puts none.keys.join(",")
|
|
|
|
yes_or_die "Fix these slots by covering with a random node?"
|
|
|
|
none.each{|slot,nodes|
|
|
|
|
node = @nodes.sample
|
|
|
|
puts "Covering slot #{slot} with #{node}"
|
|
|
|
node.r.cluster("addslots",slot)
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
# Handle case "2": keys only in one node.
|
|
|
|
if single.length > 0
|
2013-02-21 12:10:06 -05:00
|
|
|
puts "The folowing uncovered slots have keys in just one node:"
|
|
|
|
puts single.keys.join(",")
|
2013-02-22 04:23:53 -05:00
|
|
|
yes_or_die "Fix these slots by covering with those nodes?"
|
2013-02-21 12:10:06 -05:00
|
|
|
single.each{|slot,nodes|
|
|
|
|
puts "Covering slot #{slot} with #{nodes[0]}"
|
|
|
|
nodes[0].r.cluster("addslots",slot)
|
|
|
|
}
|
|
|
|
end
|
2013-02-22 05:33:10 -05:00
|
|
|
|
|
|
|
# Handle case "3": keys in multiple nodes.
|
|
|
|
if multi.length > 0
|
|
|
|
puts "The folowing uncovered slots have keys in multiple nodes:"
|
|
|
|
puts multi.keys.join(",")
|
|
|
|
yes_or_die "Fix these slots by moving keys into a single node?"
|
|
|
|
multi.each{|slot,nodes|
|
|
|
|
puts "Covering slot #{slot} moving keys to #{nodes[0]}"
|
|
|
|
# TODO
|
|
|
|
# 1) Set all nodes as "MIGRATING" for this slot, so that we
|
|
|
|
# can access keys in the hash slot using ASKING.
|
|
|
|
# 2) Move everything to node[0]
|
|
|
|
# 3) Clear MIGRATING from nodes, and ADDSLOTS the slot to
|
|
|
|
# node[0].
|
|
|
|
raise "TODO: Work in progress"
|
|
|
|
}
|
|
|
|
end
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
2013-03-21 12:11:54 -04:00
|
|
|
# Slot 'slot' was found to be in importing or migrating state in one or
|
|
|
|
# more nodes. This function fixes this condition by migrating keys where
|
|
|
|
# it seems more sensible.
|
|
|
|
def fix_open_slot(slot)
|
|
|
|
migrating = []
|
|
|
|
importing = []
|
|
|
|
@nodes.each{|n|
|
|
|
|
next if n.has_flag? "slave"
|
|
|
|
if n.info[:migrating][slot]
|
|
|
|
migrating << n
|
|
|
|
elsif n.info[:importing][slot]
|
|
|
|
importing << n
|
|
|
|
elsif n.r.cluster("countkeysinslot",slot) > 0
|
|
|
|
puts "Found keys about slot #{slot} in node #{n}!"
|
|
|
|
end
|
|
|
|
}
|
|
|
|
puts "Fixing open slot 0:"
|
|
|
|
puts "Set as migrating in: #{migrating.join(",")}"
|
|
|
|
puts "Set as importing in: #{importing.join(",")}"
|
|
|
|
|
|
|
|
# Case 1: The slot is in migrating state in one slot, and in
|
|
|
|
# importing state in 1 slot. That's trivial to address.
|
|
|
|
if migrating.length == 1 && importing.length == 1
|
2013-03-22 08:03:33 -04:00
|
|
|
move_slot(migrating[0],importing[0],slot,:verbose=>true)
|
2013-03-21 12:11:54 -04:00
|
|
|
else
|
|
|
|
puts "Sorry, Redis-trib can't fix this slot yet (work in progress)"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-02-22 06:25:16 -05:00
|
|
|
# Check if all the nodes agree about the cluster configuration
|
|
|
|
def check_config_consistency
|
|
|
|
signatures=[]
|
|
|
|
@nodes.each{|n|
|
|
|
|
signatures << n.get_config_signature
|
|
|
|
}
|
|
|
|
if signatures.uniq.length != 1
|
|
|
|
puts "[ERR] Nodes don't agree about configuration!"
|
|
|
|
else
|
|
|
|
puts "[OK] All nodes agree about slots configuration."
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-04-12 11:06:33 -04:00
|
|
|
def alloc_slots
|
|
|
|
slots_per_node = ClusterHashSlots/@nodes.length
|
|
|
|
i = 0
|
|
|
|
@nodes.each{|n|
|
|
|
|
first = i*slots_per_node
|
|
|
|
last = first+slots_per_node-1
|
|
|
|
last = ClusterHashSlots-1 if i == @nodes.length-1
|
|
|
|
n.add_slots first..last
|
|
|
|
i += 1
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def flush_nodes_config
|
|
|
|
@nodes.each{|n|
|
|
|
|
n.flush_node_config
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def show_nodes
|
|
|
|
@nodes.each{|n|
|
2011-04-13 09:58:05 -04:00
|
|
|
puts n.info_string
|
2011-04-12 11:06:33 -04:00
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def join_cluster
|
2011-04-13 09:58:05 -04:00
|
|
|
# We use a brute force approach to make sure the node will meet
|
|
|
|
# each other, that is, sending CLUSTER MEET messages to all the nodes
|
|
|
|
# about the very same node.
|
|
|
|
# Thanks to gossip this information should propagate across all the
|
|
|
|
# cluster in a matter of seconds.
|
|
|
|
first = false
|
|
|
|
@nodes.each{|n|
|
|
|
|
if !first then first = n.info; next; end # Skip the first node
|
|
|
|
n.r.cluster("meet",first[:host],first[:port])
|
|
|
|
}
|
2011-04-12 11:06:33 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def yes_or_die(msg)
|
|
|
|
print "#{msg} (type 'yes' to accept): "
|
|
|
|
STDOUT.flush
|
|
|
|
if !(STDIN.gets.chomp.downcase == "yes")
|
|
|
|
puts "Aborting..."
|
|
|
|
exit 1
|
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
2011-04-14 03:41:22 -04:00
|
|
|
|
2011-09-29 09:45:10 -04:00
|
|
|
def load_cluster_info_from_node(nodeaddr)
|
2013-02-27 09:58:41 -05:00
|
|
|
node = ClusterNode.new(nodeaddr)
|
2011-04-14 03:41:22 -04:00
|
|
|
node.connect(:abort => true)
|
|
|
|
node.assert_cluster
|
2011-09-29 06:50:46 -04:00
|
|
|
node.load_info(:getfriends => true)
|
2011-04-14 03:41:22 -04:00
|
|
|
add_node(node)
|
2011-09-29 06:50:46 -04:00
|
|
|
node.friends.each{|f|
|
2013-03-19 04:46:12 -04:00
|
|
|
next if f[:flags].index("noaddr") ||
|
|
|
|
f[:flags].index("disconnected") ||
|
|
|
|
f[:flags].index("fail")
|
2011-09-29 06:50:46 -04:00
|
|
|
fnode = ClusterNode.new(f[:addr])
|
|
|
|
fnode.connect()
|
|
|
|
fnode.load_info()
|
|
|
|
add_node(fnode)
|
|
|
|
}
|
2011-09-29 09:45:10 -04:00
|
|
|
end
|
|
|
|
|
2011-09-30 12:28:51 -04:00
|
|
|
# Given a list of source nodes return a "resharding plan"
|
|
|
|
# with what slots to move in order to move "numslots" slots to another
|
|
|
|
# instance.
|
|
|
|
def compute_reshard_table(sources,numslots)
|
|
|
|
moved = []
|
2011-09-30 12:41:25 -04:00
|
|
|
# Sort from bigger to smaller instance, for two reasons:
|
2013-02-15 11:11:55 -05:00
|
|
|
# 1) If we take less slots than instances it is better to start
|
|
|
|
# getting from the biggest instances.
|
|
|
|
# 2) We take one slot more from the first instance in the case of not
|
|
|
|
# perfect divisibility. Like we have 3 nodes and need to get 10
|
|
|
|
# slots, we take 4 from the first, and 3 from the rest. So the
|
|
|
|
# biggest is always the first.
|
2011-09-30 12:41:25 -04:00
|
|
|
sources = sources.sort{|a,b| b.slots.length <=> a.slots.length}
|
2013-02-15 11:11:55 -05:00
|
|
|
source_tot_slots = sources.inject(0) {|sum,source|
|
|
|
|
sum+source.slots.length
|
|
|
|
}
|
2011-09-30 12:41:25 -04:00
|
|
|
sources.each_with_index{|s,i|
|
2011-09-30 12:28:51 -04:00
|
|
|
# Every node will provide a number of slots proportional to the
|
|
|
|
# slots it has assigned.
|
2011-10-13 09:16:03 -04:00
|
|
|
n = (numslots.to_f/source_tot_slots*s.slots.length)
|
2011-09-30 12:41:25 -04:00
|
|
|
if i == 0
|
|
|
|
n = n.ceil
|
|
|
|
else
|
|
|
|
n = n.floor
|
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
s.slots.keys.sort[(0...n)].each{|slot|
|
|
|
|
if moved.length < numslots
|
|
|
|
moved << {:source => s, :slot => slot}
|
|
|
|
end
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return moved
|
|
|
|
end
|
|
|
|
|
|
|
|
def show_reshard_table(table)
|
|
|
|
table.each{|e|
|
2011-09-30 13:21:36 -04:00
|
|
|
puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}"
|
2011-09-30 12:28:51 -04:00
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2011-09-30 16:18:07 -04:00
|
|
|
def move_slot(source,target,slot,o={})
|
2011-09-30 13:21:36 -04:00
|
|
|
# We start marking the slot as importing in the destination node,
|
|
|
|
# and the slot as migrating in the target host. Note that the order of
|
2013-02-15 11:11:55 -05:00
|
|
|
# the operations is important, as otherwise a client may be redirected
|
|
|
|
# to the target node that does not yet know it is importing this slot.
|
2013-03-21 12:11:54 -04:00
|
|
|
print "Moving slot #{slot} from #{source} to #{target}: "; STDOUT.flush
|
2011-09-30 16:18:07 -04:00
|
|
|
target.r.cluster("setslot",slot,"importing",source.info[:name])
|
2013-02-20 09:29:44 -05:00
|
|
|
source.r.cluster("setslot",slot,"migrating",target.info[:name])
|
2011-09-30 13:21:36 -04:00
|
|
|
# Migrate all the keys from source to target using the MIGRATE command
|
2011-09-30 16:18:07 -04:00
|
|
|
while true
|
|
|
|
keys = source.r.cluster("getkeysinslot",slot,10)
|
|
|
|
break if keys.length == 0
|
|
|
|
keys.each{|key|
|
2012-03-31 05:28:37 -04:00
|
|
|
source.r.migrate(target.info[:host],target.info[:port],key,0,1000)
|
2011-09-30 16:18:07 -04:00
|
|
|
print "." if o[:verbose]
|
|
|
|
STDOUT.flush
|
|
|
|
}
|
|
|
|
end
|
|
|
|
puts
|
|
|
|
# Set the new node as the owner of the slot in all the known nodes.
|
|
|
|
@nodes.each{|n|
|
|
|
|
n.r.cluster("setslot",slot,"node",target.info[:name])
|
|
|
|
}
|
2011-09-30 13:21:36 -04:00
|
|
|
end
|
|
|
|
|
2011-09-29 09:45:10 -04:00
|
|
|
# redis-trib subcommands implementations
|
|
|
|
|
2013-02-21 07:00:41 -05:00
|
|
|
def check_cluster_cmd
|
|
|
|
load_cluster_info_from_node(ARGV[1])
|
|
|
|
check_cluster
|
|
|
|
end
|
|
|
|
|
|
|
|
def fix_cluster_cmd
|
|
|
|
@fix = true
|
2011-09-29 09:45:10 -04:00
|
|
|
load_cluster_info_from_node(ARGV[1])
|
2011-04-14 03:41:22 -04:00
|
|
|
check_cluster
|
|
|
|
end
|
|
|
|
|
2011-09-29 10:06:49 -04:00
|
|
|
def reshard_cluster_cmd
|
|
|
|
load_cluster_info_from_node(ARGV[1])
|
2013-02-21 07:00:41 -05:00
|
|
|
check_cluster
|
|
|
|
if @errors.length != 0
|
2013-03-22 07:59:18 -04:00
|
|
|
puts "\n--- Please fix your cluster problems before resharding ---"
|
2011-09-29 10:06:49 -04:00
|
|
|
exit 1
|
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
numslots = 0
|
2013-02-14 06:55:34 -05:00
|
|
|
while numslots <= 0 or numslots > ClusterHashSlots
|
|
|
|
print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? "
|
2011-09-30 12:28:51 -04:00
|
|
|
numslots = STDIN.gets.to_i
|
|
|
|
end
|
|
|
|
target = nil
|
|
|
|
while not target
|
|
|
|
print "What is the receiving node ID? "
|
|
|
|
target = get_node_by_name(STDIN.gets.chop)
|
2013-03-21 13:17:06 -04:00
|
|
|
if !target || target.has_flag?("slave")
|
|
|
|
puts "The specified node is not known or not a master, please retry."
|
|
|
|
target = nil
|
2011-09-30 12:28:51 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
sources = []
|
|
|
|
puts "Please enter all the source node IDs."
|
|
|
|
puts " Type 'all' to use all the nodes as source nodes for the hash slots."
|
|
|
|
puts " Type 'done' once you entered all the source nodes IDs."
|
|
|
|
while true
|
|
|
|
print "Source node ##{sources.length+1}:"
|
|
|
|
line = STDIN.gets.chop
|
|
|
|
src = get_node_by_name(line)
|
|
|
|
if line == "done"
|
|
|
|
if sources.length == 0
|
|
|
|
puts "No source nodes given, operation aborted"
|
|
|
|
exit 1
|
|
|
|
else
|
|
|
|
break
|
|
|
|
end
|
|
|
|
elsif line == "all"
|
|
|
|
@nodes.each{|n|
|
|
|
|
next if n.info[:name] == target.info[:name]
|
2013-03-21 13:17:06 -04:00
|
|
|
next if n.has_flag?("slave")
|
2011-09-30 12:28:51 -04:00
|
|
|
sources << n
|
|
|
|
}
|
|
|
|
break
|
2013-03-22 07:28:06 -04:00
|
|
|
elsif !src || src.has_flag?("slave")
|
2013-03-21 13:17:06 -04:00
|
|
|
puts "The specified node is not known or is not a master, please retry."
|
2011-09-30 12:28:51 -04:00
|
|
|
elsif src.info[:name] == target.info[:name]
|
|
|
|
puts "It is not possible to use the target node as source node."
|
|
|
|
else
|
|
|
|
sources << src
|
|
|
|
end
|
2011-09-29 10:06:49 -04:00
|
|
|
end
|
2011-09-30 12:28:51 -04:00
|
|
|
puts "\nReady to move #{numslots} slots."
|
|
|
|
puts " Source nodes:"
|
|
|
|
sources.each{|s| puts " "+s.info_string}
|
|
|
|
puts " Destination node:"
|
|
|
|
puts " #{target.info_string}"
|
|
|
|
reshard_table = compute_reshard_table(sources,numslots)
|
2011-09-30 13:21:36 -04:00
|
|
|
puts " Resharding plan:"
|
2011-09-30 12:28:51 -04:00
|
|
|
show_reshard_table(reshard_table)
|
2011-09-30 13:21:36 -04:00
|
|
|
print "Do you want to proceed with the proposed reshard plan (yes/no)? "
|
|
|
|
yesno = STDIN.gets.chop
|
|
|
|
exit(1) if (yesno != "yes")
|
|
|
|
reshard_table.each{|e|
|
2011-09-30 16:18:07 -04:00
|
|
|
move_slot(e[:source],target,e[:slot],:verbose=>true)
|
2011-09-30 13:21:36 -04:00
|
|
|
}
|
2011-09-29 10:06:49 -04:00
|
|
|
end
|
|
|
|
|
2011-04-14 03:41:22 -04:00
|
|
|
def create_cluster_cmd
|
|
|
|
puts "Creating cluster"
|
|
|
|
ARGV[1..-1].each{|n|
|
|
|
|
node = ClusterNode.new(n)
|
|
|
|
node.connect(:abort => true)
|
|
|
|
node.assert_cluster
|
2011-10-04 09:53:07 -04:00
|
|
|
node.load_info
|
2011-04-14 03:41:22 -04:00
|
|
|
node.assert_empty
|
|
|
|
add_node(node)
|
|
|
|
}
|
|
|
|
puts "Performing hash slots allocation on #{@nodes.length} nodes..."
|
|
|
|
alloc_slots
|
|
|
|
show_nodes
|
|
|
|
yes_or_die "Can I set the above configuration?"
|
|
|
|
flush_nodes_config
|
|
|
|
puts "** Nodes configuration updated"
|
|
|
|
puts "** Sending CLUSTER MEET messages to join the cluster"
|
|
|
|
join_cluster
|
|
|
|
check_cluster
|
|
|
|
end
|
2013-02-27 09:58:41 -05:00
|
|
|
|
|
|
|
def addnode_cluster_cmd
|
|
|
|
puts "Adding node #{ARGV[1]} to cluster #{ARGV[2]}"
|
|
|
|
|
|
|
|
# Check the existing cluster
|
|
|
|
load_cluster_info_from_node(ARGV[2])
|
|
|
|
check_cluster
|
|
|
|
|
|
|
|
# Add the new node
|
|
|
|
new = ClusterNode.new(ARGV[1])
|
|
|
|
new.connect(:abort => true)
|
|
|
|
new.assert_cluster
|
|
|
|
new.load_info
|
|
|
|
new.assert_empty
|
|
|
|
first = @nodes.first.info
|
|
|
|
|
|
|
|
# Send CLUSTER MEET command to the new node
|
|
|
|
puts "Send CLUSTER MEET to node #{new} to make it join the cluster."
|
|
|
|
new.r.cluster("meet",first[:host],first[:port])
|
|
|
|
end
|
2013-02-27 12:02:22 -05:00
|
|
|
|
|
|
|
def help_cluster_cmd
|
|
|
|
show_help
|
|
|
|
exit 0
|
|
|
|
end
|
2011-04-11 10:41:06 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
COMMANDS={
|
2013-02-21 07:00:41 -05:00
|
|
|
"create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"],
|
|
|
|
"check" => ["check_cluster_cmd", 2, "host:port"],
|
|
|
|
"fix" => ["fix_cluster_cmd", 2, "host:port"],
|
2013-02-27 09:58:41 -05:00
|
|
|
"reshard" => ["reshard_cluster_cmd", 2, "host:port"],
|
2013-02-27 12:02:22 -05:00
|
|
|
"addnode" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"],
|
|
|
|
"help" => ["help_cluster_cmd", 1, "(show this help)"]
|
2011-04-11 10:41:06 -04:00
|
|
|
}
|
|
|
|
|
2013-02-27 12:02:22 -05:00
|
|
|
def show_help
|
2011-04-11 10:41:06 -04:00
|
|
|
puts "Usage: redis-trib <command> <arguments ...>"
|
2011-04-13 04:38:56 -04:00
|
|
|
puts
|
|
|
|
COMMANDS.each{|k,v|
|
2013-02-21 07:00:41 -05:00
|
|
|
puts " #{k.ljust(10)} #{v[2]}"
|
2011-04-13 04:38:56 -04:00
|
|
|
}
|
|
|
|
puts
|
2013-02-27 12:02:22 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
# Sanity check
|
|
|
|
if ARGV.length == 0
|
|
|
|
show_help
|
2011-04-11 10:41:06 -04:00
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
rt = RedisTrib.new
|
|
|
|
cmd_spec = COMMANDS[ARGV[0].downcase]
|
|
|
|
if !cmd_spec
|
|
|
|
puts "Unknown redis-trib subcommand '#{ARGV[0]}'"
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
rt.check_arity(cmd_spec[1],ARGV.length)
|
|
|
|
|
|
|
|
# Dispatch
|
|
|
|
rt.send(cmd_spec[0])
|