2010-06-21 18:07:48 -04:00
/*
2012-11-08 12:25:23 -05:00
* Copyright ( c ) 2009 - 2012 , Salvatore Sanfilippo < antirez at gmail dot com >
2010-06-21 18:07:48 -04:00
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* * Redistributions of source code must retain the above copyright notice ,
* this list of conditions and the following disclaimer .
* * Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution .
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS
* INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN
* CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE )
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE .
*/
# include "redis.h"
2013-10-09 09:37:20 -04:00
# include "cluster.h"
2011-06-30 07:27:32 -04:00
# include "slowlog.h"
2011-09-13 10:10:26 -04:00
# include "bio.h"
2010-06-21 18:07:48 -04:00
# include <time.h>
# include <signal.h>
# include <sys/wait.h>
# include <errno.h>
# include <assert.h>
# include <ctype.h>
# include <stdarg.h>
# include <arpa/inet.h>
# include <sys/stat.h>
# include <fcntl.h>
# include <sys/time.h>
# include <sys/resource.h>
# include <sys/uio.h>
# include <limits.h>
# include <float.h>
# include <math.h>
2010-09-16 07:28:58 -04:00
# include <sys/resource.h>
2012-04-04 09:38:13 -04:00
# include <sys/utsname.h>
2013-07-12 06:06:05 -04:00
# include <locale.h>
2010-06-21 18:07:48 -04:00
/* Our shared "common" objects */
struct sharedObjectsStruct shared ;
2011-08-26 14:40:18 -04:00
/* Global vars that are actually used as constants. The following double
2010-06-21 18:07:48 -04:00
* values are used for double on - disk serialization , and are initialized
* at runtime to avoid strange compiler optimizations . */
double R_Zero , R_PosInf , R_NegInf , R_Nan ;
/*================================= Globals ================================= */
/* Global vars */
struct redisServer server ; /* server global state */
struct redisCommand * commandTable ;
2011-09-26 09:40:39 -04:00
2011-11-22 04:13:45 -05:00
/* Our command table.
*
* Every entry is composed of the following fields :
*
* name : a string representing the command name .
* function : pointer to the C function implementing the command .
* arity : number of arguments , it is possible to use - N to say > = N
* sflags : command flags as string . See below for a table of flags .
* flags : flags as bitmask . Computed by Redis using the ' sflags ' field .
* get_keys_proc : an optional function to get key arguments from a command .
* This is only used when the following three fields are not
* enough to specify what arguments are keys .
* first_key_index : first argument that is a key
* last_key_index : last argument that is a key
* key_step : step to get all the keys from first to last argument . For instance
* in MSET the step is two since arguments are key , val , key , val , . . .
* microseconds : microseconds of total execution time for this command .
* calls : total number of calls of this command .
*
* The flags , microseconds and calls fields are computed by Redis and should
* always be set to zero .
*
* Command flags are expressed using strings where every character represents
* a flag . Later the populateCommandTable ( ) function will take care of
* populating the real ' flags ' field using this characters .
2011-09-26 09:40:39 -04:00
*
* This is the meaning of the flags :
*
* w : write command ( may modify the key space ) .
* r : read command ( will never modify the key space ) .
* m : may increase memory usage once called . Don ' t allow if out of memory .
* a : admin command , like SAVE or SHUTDOWN .
* p : Pub / Sub related command .
2013-01-16 12:00:20 -05:00
* f : force replication of this command , regardless of server . dirty .
2011-09-27 07:45:46 -04:00
* s : command not allowed in scripts .
2011-11-21 04:55:52 -05:00
* R : random command . Command is not deterministic , that is , the same command
2011-09-27 07:45:46 -04:00
* with the same arguments , with the same key space , may have different
2012-01-31 10:09:21 -05:00
* results . For instance SPOP and RANDOMKEY are two random commands .
* S : Sort command output array if called from script , so that the output
* is deterministic .
2012-07-22 11:13:49 -04:00
* l : Allow command while loading the database .
* t : Allow command while a slave has stale data but is not allowed to
* server this data . Normally no command is accepted in this condition
* but just a few .
2012-10-16 11:35:50 -04:00
* M : Do not automatically propagate the command on MONITOR .
2013-02-20 11:28:35 -05:00
* k : Perform an implicit ASKING for this command , so the command will be
* accepted in cluster mode if the slot is marked as ' importing ' .
2012-01-31 10:09:21 -05:00
*/
2011-01-24 04:56:06 -05:00
struct redisCommand redisCommandTable [ ] = {
2011-09-26 09:40:39 -04:00
{ " get " , getCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2014-03-10 08:18:41 -04:00
{ " set " , setCommand , - 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " setnx " , setnxCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " setex " , setexCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " psetex " , psetexCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " append " , appendCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " strlen " , strlenCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2014-03-10 08:18:41 -04:00
{ " del " , delCommand , - 2 , " w " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " exists " , existsCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " setbit " , setbitCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " getbit " , getbitCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " setrange " , setrangeCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " getrange " , getrangeCommand , 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " substr " , getrangeCommand , 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " incr " , incrCommand , 2 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " decr " , decrCommand , 2 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " mget " , mgetCommand , - 2 , " r " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
{ " rpush " , rpushCommand , - 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lpush " , lpushCommand , - 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " rpushx " , rpushxCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lpushx " , lpushxCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " linsert " , linsertCommand , 5 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " rpop " , rpopCommand , 2 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lpop " , lpopCommand , 2 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-11-18 08:23:38 -05:00
{ " brpop " , brpopCommand , - 3 , " ws " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " brpoplpush " , brpoplpushCommand , 4 , " wms " , 0 , NULL , 1 , 2 , 1 , 0 , 0 } ,
{ " blpop " , blpopCommand , - 3 , " ws " , 0 , NULL , 1 , - 2 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " llen " , llenCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lindex " , lindexCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lset " , lsetCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lrange " , lrangeCommand , 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " ltrim " , ltrimCommand , 4 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " lrem " , lremCommand , 4 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " rpoplpush " , rpoplpushCommand , 3 , " wm " , 0 , NULL , 1 , 2 , 1 , 0 , 0 } ,
{ " sadd " , saddCommand , - 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " srem " , sremCommand , - 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " smove " , smoveCommand , 4 , " w " , 0 , NULL , 1 , 2 , 1 , 0 , 0 } ,
{ " sismember " , sismemberCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " scard " , scardCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-27 07:57:10 -04:00
{ " spop " , spopCommand , 2 , " wRs " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-09-19 15:29:40 -04:00
{ " srandmember " , srandmemberCommand , - 2 , " rR " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " sinter " , sinterCommand , - 2 , " rS " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2011-11-22 08:27:32 -05:00
{ " sinterstore " , sinterstoreCommand , - 3 , " wm " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " sunion " , sunionCommand , - 2 , " rS " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2011-11-22 08:27:32 -05:00
{ " sunionstore " , sunionstoreCommand , - 3 , " wm " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " sdiff " , sdiffCommand , - 2 , " rS " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2011-11-22 08:27:32 -05:00
{ " sdiffstore " , sdiffstoreCommand , - 3 , " wm " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " smembers " , sinterCommand , 2 , " rS " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2013-10-28 06:17:32 -04:00
{ " sscan " , sscanCommand , - 3 , " rR " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " zadd " , zaddCommand , - 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zincrby " , zincrbyCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrem " , zremCommand , - 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zremrangebyscore " , zremrangebyscoreCommand , 4 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zremrangebyrank " , zremrangebyrankCommand , 4 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zunionstore " , zunionstoreCommand , - 4 , " wm " , 0 , zunionInterGetKeys , 0 , 0 , 0 , 0 , 0 } ,
{ " zinterstore " , zinterstoreCommand , - 4 , " wm " , 0 , zunionInterGetKeys , 0 , 0 , 0 , 0 , 0 } ,
{ " zrange " , zrangeCommand , - 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrangebyscore " , zrangebyscoreCommand , - 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrevrangebyscore " , zrevrangebyscoreCommand , - 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zcount " , zcountCommand , 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrevrange " , zrevrangeCommand , - 4 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zcard " , zcardCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zscore " , zscoreCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrank " , zrankCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " zrevrank " , zrevrankCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2013-10-28 06:36:42 -04:00
{ " zscan " , zscanCommand , - 3 , " rR " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " hset " , hsetCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hsetnx " , hsetnxCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hget " , hgetCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hmset " , hmsetCommand , - 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hmget " , hmgetCommand , - 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hincrby " , hincrbyCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-11-15 09:09:39 -05:00
{ " hincrbyfloat " , hincrbyfloatCommand , 4 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " hdel " , hdelCommand , - 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hlen " , hlenCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " hkeys " , hkeysCommand , 2 , " rS " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hvals " , hvalsCommand , 2 , " rS " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " hgetall " , hgetallCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " hexists " , hexistsCommand , 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2013-10-28 06:32:34 -04:00
{ " hscan " , hscanCommand , - 3 , " rR " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " incrby " , incrbyCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " decrby " , decrbyCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-11-12 13:27:35 -05:00
{ " incrbyfloat " , incrbyfloatCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " getset " , getsetCommand , 3 , " wm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " mset " , msetCommand , - 3 , " wm " , 0 , NULL , 1 , - 1 , 2 , 0 , 0 } ,
{ " msetnx " , msetnxCommand , - 3 , " wm " , 0 , NULL , 1 , - 1 , 2 , 0 , 0 } ,
2011-09-27 07:45:46 -04:00
{ " randomkey " , randomkeyCommand , 1 , " rR " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-03-26 08:51:17 -04:00
{ " select " , selectCommand , 2 , " rl " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " move " , moveCommand , 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2014-03-10 08:18:41 -04:00
{ " rename " , renameCommand , 3 , " w " , 0 , NULL , 1 , 2 , 1 , 0 , 0 } ,
{ " renamenx " , renamenxCommand , 3 , " w " , 0 , NULL , 1 , 2 , 1 , 0 , 0 } ,
2011-11-10 11:52:02 -05:00
{ " expire " , expireCommand , 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " expireat " , expireatCommand , 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " pexpire " , pexpireCommand , 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " pexpireat " , pexpireatCommand , 3 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-01-31 10:09:21 -05:00
{ " keys " , keysCommand , 2 , " rS " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-10-28 06:13:43 -04:00
{ " scan " , scanCommand , - 2 , " rR " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " dbsize " , dbsizeCommand , 1 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-09-17 03:46:01 -04:00
{ " auth " , authCommand , 2 , " rslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " ping " , pingCommand , 1 , " rt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " echo " , echoCommand , 2 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-11-18 08:23:38 -05:00
{ " save " , saveCommand , 1 , " ars " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " bgsave " , bgsaveCommand , 1 , " ar " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " bgrewriteaof " , bgrewriteaofCommand , 1 , " ar " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-02-03 09:51:03 -05:00
{ " shutdown " , shutdownCommand , - 1 , " arlt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-02-07 13:13:00 -05:00
{ " lastsave " , lastsaveCommand , 1 , " rR " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " type " , typeCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-27 07:45:46 -04:00
{ " multi " , multiCommand , 1 , " rs " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2012-10-16 11:35:50 -04:00
{ " exec " , execCommand , 1 , " sM " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-27 07:45:46 -04:00
{ " discard " , discardCommand , 1 , " rs " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " sync " , syncCommand , 1 , " ars " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-01-30 12:33:16 -05:00
{ " psync " , syncCommand , 3 , " ars " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-05-28 09:23:42 -04:00
{ " replconf " , replconfCommand , - 1 , " arslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " flushdb " , flushdbCommand , 1 , " w " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " flushall " , flushallCommand , 1 , " w " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-03-10 11:26:08 -04:00
{ " sort " , sortCommand , - 2 , " wm " , 0 , sortGetKeys , 1 , 1 , 1 , 0 , 0 } ,
2012-07-22 11:13:49 -04:00
{ " info " , infoCommand , - 1 , " rlt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-27 07:45:46 -04:00
{ " monitor " , monitorCommand , 1 , " ars " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-11-10 11:52:02 -05:00
{ " ttl " , ttlCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " pttl " , pttlCommand , 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " persist " , persistCommand , 2 , " w " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-07-22 11:13:49 -04:00
{ " slaveof " , slaveofCommand , 3 , " ast " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2012-03-20 12:53:47 -04:00
{ " debug " , debugCommand , - 2 , " as " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-02-03 09:51:03 -05:00
{ " config " , configCommand , - 2 , " art " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2012-07-22 11:13:49 -04:00
{ " subscribe " , subscribeCommand , - 2 , " rpslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " unsubscribe " , unsubscribeCommand , - 1 , " rpslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " psubscribe " , psubscribeCommand , - 2 , " rpslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " punsubscribe " , punsubscribeCommand , - 1 , " rpslt " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-06-21 06:07:53 -04:00
{ " publish " , publishCommand , 3 , " pltr " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2013-06-20 09:32:00 -04:00
{ " pubsub " , pubsubCommand , - 2 , " pltrR " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-03-10 08:18:41 -04:00
{ " watch " , watchCommand , - 2 , " rs " , 0 , NULL , 1 , - 1 , 1 , 0 , 0 } ,
2011-09-27 07:45:46 -04:00
{ " unwatch " , unwatchCommand , 1 , " rs " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " cluster " , clusterCommand , - 2 , " ar " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2012-11-07 04:57:23 -05:00
{ " restore " , restoreCommand , - 4 , " awm " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2013-02-20 11:28:35 -05:00
{ " restore-asking " , restoreCommand , - 4 , " awmk " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2012-11-07 09:32:27 -05:00
{ " migrate " , migrateCommand , - 6 , " aw " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-10-17 11:35:23 -04:00
{ " asking " , askingCommand , 1 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-01-14 10:33:14 -05:00
{ " readonly " , readonlyCommand , 1 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " readwrite " , readwriteCommand , 1 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2011-11-22 08:27:32 -05:00
{ " dump " , dumpCommand , 2 , " ar " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
{ " object " , objectCommand , - 2 , " r " , 0 , NULL , 2 , 2 , 2 , 0 , 0 } ,
2011-09-26 09:40:39 -04:00
{ " client " , clientCommand , - 2 , " ar " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2014-03-10 10:26:10 -04:00
{ " eval " , evalCommand , - 3 , " s " , 0 , evalGetKeys , 0 , 0 , 0 , 0 , 0 } ,
{ " evalsha " , evalShaCommand , - 3 , " s " , 0 , evalGetKeys , 0 , 0 , 0 , 0 , 0 } ,
2011-10-24 16:47:00 -04:00
{ " slowlog " , slowlogCommand , - 2 , " r " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
2012-03-07 04:38:01 -05:00
{ " script " , scriptCommand , - 2 , " ras " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
New commands: BITOP and BITCOUNT.
The motivation for this new commands is to be search in the usage of
Redis for real time statistics. See the article "Fast real time metrics
using Redis".
http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps/
In general Redis strings when used as bitmaps using the SETBIT/GETBIT
command provide a very space-efficient and fast way to store statistics.
For instance in a web application with users, every user can be
associated with a key that shows every day in which the user visited the
web service. This information can be really valuable to extract user
behaviour information.
With Redis bitmaps doing this is very simple just saying that a given
day is 0 (the data the service was put online) and all the next days are
1, 2, 3, and so forth. So with SETBIT it is possible to set the bit
corresponding to the current day every time the user visits the site.
It is possible to take the count of the bit sets on the run, this is
extremely easy using a Lua script. However a fast bit count native
operation can be useful, especially if it can operate on ranges, or when
the string is small like in the case of days (even if you consider many
years it is still extremely little data).
For this reason BITOP was introduced. The command counts the number of
bits set to 1 in a string, with optional range:
BITCOUNT key [start end]
The start/end parameters are similar to GETRANGE. If omitted the whole
string is tested.
Population counting is more useful when bit-level operations like AND,
OR and XOR are avaialble. For instance I can test multiple users to see
the number of days three users visited the site at the same time. To do
this we can take the AND of all the bitmaps, and then count the set bits.
For this reason the BITOP command was introduced:
BITOP [AND|OR|XOR|NOT] dest_key src_key1 src_key2 src_key3 ... src_keyN
In the special case of NOT (that inverts the bits) only one source key
can be passed.
The judicious use of BITCOUNT and BITOP combined can lead to interesting
use cases with very space efficient representation of data.
The implementation provided is still not tested and optimized for speed,
next commits will introduce unit tests. Later the implementation will be
profiled to see if it is possible to gain an important amount of speed
without making the code much more complex.
2012-05-16 10:23:09 -04:00
{ " time " , timeCommand , 1 , " rR " , 0 , NULL , 0 , 0 , 0 , 0 , 0 } ,
{ " bitop " , bitopCommand , - 4 , " wm " , 0 , NULL , 2 , - 1 , 1 , 0 , 0 } ,
2013-12-04 09:52:20 -05:00
{ " bitcount " , bitcountCommand , - 2 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2014-02-27 06:40:58 -05:00
{ " bitpos " , bitposCommand , - 3 , " r " , 0 , NULL , 1 , 1 , 1 , 0 , 0 } ,
2013-12-04 09:52:20 -05:00
{ " wait " , waitCommand , 3 , " rs " , 0 , NULL , 0 , 0 , 0 , 0 , 0 }
2010-06-21 18:07:48 -04:00
} ;
2014-03-20 06:57:29 -04:00
struct evictionPoolEntry * evictionPoolAlloc ( void ) ;
2010-06-21 18:07:48 -04:00
/*============================ Utility functions ============================ */
2011-02-09 04:10:35 -05:00
/* Low level logging. To use only for very big messages, otherwise
* redisLog ( ) is to prefer . */
void redisLogRaw ( int level , const char * msg ) {
2010-12-09 11:10:21 -05:00
const int syslogLevelMap [ ] = { LOG_DEBUG , LOG_INFO , LOG_NOTICE , LOG_WARNING } ;
const char * c = " .-*# " ;
2010-06-21 18:07:48 -04:00
FILE * fp ;
2010-07-22 17:31:40 -04:00
char buf [ 64 ] ;
2011-04-13 04:58:21 -04:00
int rawmode = ( level & REDIS_LOG_RAW ) ;
2013-05-15 04:12:29 -04:00
int log_to_stdout = server . logfile [ 0 ] = = ' \0 ' ;
2010-07-22 17:31:40 -04:00
2011-04-13 04:58:21 -04:00
level & = 0xff ; /* clear flags */
2010-07-22 17:31:40 -04:00
if ( level < server . verbosity ) return ;
2010-06-21 18:07:48 -04:00
2013-05-15 04:12:29 -04:00
fp = log_to_stdout ? stdout : fopen ( server . logfile , " a " ) ;
2010-06-21 18:07:48 -04:00
if ( ! fp ) return ;
2011-04-13 04:58:21 -04:00
if ( rawmode ) {
fprintf ( fp , " %s " , msg ) ;
} else {
2012-04-04 09:11:17 -04:00
int off ;
struct timeval tv ;
gettimeofday ( & tv , NULL ) ;
off = strftime ( buf , sizeof ( buf ) , " %d %b %H:%M:%S. " , localtime ( & tv . tv_sec ) ) ;
snprintf ( buf + off , sizeof ( buf ) - off , " %03d " , ( int ) tv . tv_usec / 1000 ) ;
2011-04-13 04:58:21 -04:00
fprintf ( fp , " [%d] %s %c %s \n " , ( int ) getpid ( ) , buf , c [ level ] , msg ) ;
}
2010-12-09 11:10:21 -05:00
fflush ( fp ) ;
2013-05-15 04:12:29 -04:00
if ( ! log_to_stdout ) fclose ( fp ) ;
2010-12-09 11:10:21 -05:00
if ( server . syslog_enabled ) syslog ( syslogLevelMap [ level ] , " %s " , msg ) ;
2010-06-21 18:07:48 -04:00
}
2013-01-16 12:00:20 -05:00
/* Like redisLogRaw() but with printf-alike support. This is the function that
2011-02-09 04:10:35 -05:00
* is used across the code . The raw version is only used in order to dump
* the INFO output on crash . */
void redisLog ( int level , const char * fmt , . . . ) {
va_list ap ;
char msg [ REDIS_MAX_LOGMSG_LEN ] ;
2011-04-13 04:58:21 -04:00
if ( ( level & 0xff ) < server . verbosity ) return ;
2011-02-09 04:10:35 -05:00
va_start ( ap , fmt ) ;
vsnprintf ( msg , sizeof ( msg ) , fmt , ap ) ;
va_end ( ap ) ;
redisLogRaw ( level , msg ) ;
}
2012-03-28 07:45:39 -04:00
/* Log a fixed message without printf-alike capabilities, in a way that is
* safe to call from a signal handler .
*
* We actually use this only for signals that are not fatal from the point
* of view of Redis . Signals that are going to kill the server anyway and
* where we need printf - alike features are served by redisLog ( ) . */
void redisLogFromHandler ( int level , const char * msg ) {
int fd ;
2013-05-15 04:12:29 -04:00
int log_to_stdout = server . logfile [ 0 ] = = ' \0 ' ;
2012-03-28 07:45:39 -04:00
char buf [ 64 ] ;
2013-05-15 04:12:29 -04:00
if ( ( level & 0xff ) < server . verbosity | | ( log_to_stdout & & server . daemonize ) )
return ;
fd = log_to_stdout ? STDOUT_FILENO :
open ( server . logfile , O_APPEND | O_CREAT | O_WRONLY , 0644 ) ;
2012-03-28 07:45:39 -04:00
if ( fd = = - 1 ) return ;
ll2string ( buf , sizeof ( buf ) , getpid ( ) ) ;
2012-04-10 10:48:28 -04:00
if ( write ( fd , " [ " , 1 ) = = - 1 ) goto err ;
if ( write ( fd , buf , strlen ( buf ) ) = = - 1 ) goto err ;
if ( write ( fd , " | signal handler] ( " , 20 ) = = - 1 ) goto err ;
2012-03-28 07:45:39 -04:00
ll2string ( buf , sizeof ( buf ) , time ( NULL ) ) ;
2012-04-10 10:48:28 -04:00
if ( write ( fd , buf , strlen ( buf ) ) = = - 1 ) goto err ;
if ( write ( fd , " ) " , 2 ) = = - 1 ) goto err ;
if ( write ( fd , msg , strlen ( msg ) ) = = - 1 ) goto err ;
if ( write ( fd , " \n " , 1 ) = = - 1 ) goto err ;
err :
2013-05-15 04:12:29 -04:00
if ( ! log_to_stdout ) close ( fd ) ;
2012-03-28 07:45:39 -04:00
}
2011-01-23 05:46:34 -05:00
/* Return the UNIX time in microseconds */
long long ustime ( void ) {
struct timeval tv ;
long long ust ;
gettimeofday ( & tv , NULL ) ;
ust = ( ( long long ) tv . tv_sec ) * 1000000 ;
ust + = tv . tv_usec ;
return ust ;
}
2011-11-08 18:03:03 -05:00
/* Return the UNIX time in milliseconds */
long long mstime ( void ) {
return ustime ( ) / 1000 ;
}
2012-04-07 06:11:23 -04:00
/* After an RDB dump or AOF rewrite we exit from children using _exit() instead of
* exit ( ) , because the latter may interact with the same file objects used by
* the parent process . However if we are testing the coverage normal exit ( ) is
* used in order to obtain the right coverage information . */
void exitFromChild ( int retcode ) {
# ifdef COVERAGE_TEST
exit ( retcode ) ;
# else
_exit ( retcode ) ;
# endif
}
2010-06-21 18:07:48 -04:00
/*====================== Hash table type implementation ==================== */
2013-12-05 10:35:32 -05:00
/* This is a hash table type that uses the SDS dynamic strings library as
2010-06-21 18:07:48 -04:00
* keys and radis objects as values ( objects can hold SDS strings ,
* lists , sets ) . */
void dictVanillaFree ( void * privdata , void * val )
{
DICT_NOTUSED ( privdata ) ;
zfree ( val ) ;
}
void dictListDestructor ( void * privdata , void * val )
{
DICT_NOTUSED ( privdata ) ;
listRelease ( ( list * ) val ) ;
}
int dictSdsKeyCompare ( void * privdata , const void * key1 ,
const void * key2 )
{
int l1 , l2 ;
DICT_NOTUSED ( privdata ) ;
l1 = sdslen ( ( sds ) key1 ) ;
l2 = sdslen ( ( sds ) key2 ) ;
if ( l1 ! = l2 ) return 0 ;
return memcmp ( key1 , key2 , l1 ) = = 0 ;
}
2012-11-22 09:50:00 -05:00
/* A case insensitive version used for the command lookup table and other
* places where case insensitive non binary - safe comparison is needed . */
2010-11-03 06:23:59 -04:00
int dictSdsKeyCaseCompare ( void * privdata , const void * key1 ,
const void * key2 )
{
DICT_NOTUSED ( privdata ) ;
return strcasecmp ( key1 , key2 ) = = 0 ;
}
2010-06-21 18:07:48 -04:00
void dictRedisObjectDestructor ( void * privdata , void * val )
{
DICT_NOTUSED ( privdata ) ;
if ( val = = NULL ) return ; /* Values of swapped out keys as set to NULL */
decrRefCount ( val ) ;
}
void dictSdsDestructor ( void * privdata , void * val )
{
DICT_NOTUSED ( privdata ) ;
sdsfree ( val ) ;
}
int dictObjKeyCompare ( void * privdata , const void * key1 ,
const void * key2 )
{
const robj * o1 = key1 , * o2 = key2 ;
return dictSdsKeyCompare ( privdata , o1 - > ptr , o2 - > ptr ) ;
}
unsigned int dictObjHash ( const void * key ) {
const robj * o = key ;
return dictGenHashFunction ( o - > ptr , sdslen ( ( sds ) o - > ptr ) ) ;
}
unsigned int dictSdsHash ( const void * key ) {
return dictGenHashFunction ( ( unsigned char * ) key , sdslen ( ( char * ) key ) ) ;
}
2010-11-03 06:23:59 -04:00
unsigned int dictSdsCaseHash ( const void * key ) {
return dictGenCaseHashFunction ( ( unsigned char * ) key , sdslen ( ( char * ) key ) ) ;
}
2010-06-21 18:07:48 -04:00
int dictEncObjKeyCompare ( void * privdata , const void * key1 ,
const void * key2 )
{
robj * o1 = ( robj * ) key1 , * o2 = ( robj * ) key2 ;
int cmp ;
if ( o1 - > encoding = = REDIS_ENCODING_INT & &
o2 - > encoding = = REDIS_ENCODING_INT )
return o1 - > ptr = = o2 - > ptr ;
o1 = getDecodedObject ( o1 ) ;
o2 = getDecodedObject ( o2 ) ;
cmp = dictSdsKeyCompare ( privdata , o1 - > ptr , o2 - > ptr ) ;
decrRefCount ( o1 ) ;
decrRefCount ( o2 ) ;
return cmp ;
}
unsigned int dictEncObjHash ( const void * key ) {
robj * o = ( robj * ) key ;
2012-06-05 15:50:10 -04:00
if ( sdsEncodedObject ( o ) ) {
2010-06-21 18:07:48 -04:00
return dictGenHashFunction ( o - > ptr , sdslen ( ( sds ) o - > ptr ) ) ;
} else {
if ( o - > encoding = = REDIS_ENCODING_INT ) {
char buf [ 32 ] ;
int len ;
len = ll2string ( buf , 32 , ( long ) o - > ptr ) ;
return dictGenHashFunction ( ( unsigned char * ) buf , len ) ;
} else {
unsigned int hash ;
o = getDecodedObject ( o ) ;
hash = dictGenHashFunction ( o - > ptr , sdslen ( ( sds ) o - > ptr ) ) ;
decrRefCount ( o ) ;
return hash ;
}
}
}
2011-07-13 09:38:03 -04:00
/* Sets type hash table */
2010-06-21 18:07:48 -04:00
dictType setDictType = {
dictEncObjHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictEncObjKeyCompare , /* key compare */
dictRedisObjectDestructor , /* key destructor */
NULL /* val destructor */
} ;
/* Sorted sets hash (note: a skiplist is used in addition to the hash table) */
dictType zsetDictType = {
dictEncObjHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictEncObjKeyCompare , /* key compare */
dictRedisObjectDestructor , /* key destructor */
2010-08-03 14:49:53 -04:00
NULL /* val destructor */
2010-06-21 18:07:48 -04:00
} ;
/* Db->dict, keys are sds strings, vals are Redis objects. */
dictType dbDictType = {
dictSdsHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCompare , /* key compare */
dictSdsDestructor , /* key destructor */
dictRedisObjectDestructor /* val destructor */
} ;
2012-11-22 09:50:00 -05:00
/* server.lua_scripts sha (as sds string) -> scripts (as robj) cache. */
dictType shaScriptObjectDictType = {
dictSdsCaseHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCaseCompare , /* key compare */
dictSdsDestructor , /* key destructor */
dictRedisObjectDestructor /* val destructor */
} ;
2010-06-21 18:07:48 -04:00
/* Db->expires */
dictType keyptrDictType = {
dictSdsHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCompare , /* key compare */
NULL , /* key destructor */
NULL /* val destructor */
} ;
2010-11-03 06:23:59 -04:00
/* Command table. sds string -> command struct pointer. */
dictType commandTableDictType = {
dictSdsCaseHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCaseCompare , /* key compare */
dictSdsDestructor , /* key destructor */
NULL /* val destructor */
} ;
2013-01-28 05:07:17 -05:00
/* Hash type hash table (note that small hashes are represented with ziplists) */
2010-06-21 18:07:48 -04:00
dictType hashDictType = {
dictEncObjHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictEncObjKeyCompare , /* key compare */
dictRedisObjectDestructor , /* key destructor */
dictRedisObjectDestructor /* val destructor */
} ;
/* Keylist hash table type has unencoded redis objects as keys and
* lists as values . It ' s used for blocking operations ( BLPOP ) and to
* map swapped keys to a list of clients waiting for this keys to be loaded . */
dictType keylistDictType = {
dictObjHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictObjKeyCompare , /* key compare */
dictRedisObjectDestructor , /* key destructor */
dictListDestructor /* val destructor */
} ;
2011-03-29 11:51:15 -04:00
/* Cluster nodes hash table, mapping nodes addresses 1.2.3.4:6379 to
* clusterNode structures . */
dictType clusterNodesDictType = {
dictSdsHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCompare , /* key compare */
dictSdsDestructor , /* key destructor */
NULL /* val destructor */
} ;
2013-11-29 11:37:06 -05:00
/* Cluster re-addition blacklist. This maps node IDs to the time
* we can re - add this node . The goal is to avoid readding a removed
* node for some time . */
dictType clusterNodesBlackListDictType = {
dictSdsCaseHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCaseCompare , /* key compare */
dictSdsDestructor , /* key destructor */
NULL /* val destructor */
} ;
2012-11-11 18:45:10 -05:00
/* Migrate cache dict type. */
dictType migrateCacheDictType = {
dictSdsHash , /* hash function */
NULL , /* key dup */
NULL , /* val dup */
dictSdsKeyCompare , /* key compare */
dictSdsDestructor , /* key destructor */
NULL /* val destructor */
} ;
2013-06-24 04:26:04 -04:00
/* Replication cached script dict (server.repl_scriptcache_dict).
* Keys are sds SHA1 strings , while values are not used at all in the current
* implementation . */
dictType replScriptCacheDictType = {
2013-06-24 12:57:31 -04:00
dictSdsCaseHash , /* hash function */
2013-06-24 04:26:04 -04:00
NULL , /* key dup */
NULL , /* val dup */
2013-06-24 12:57:31 -04:00
dictSdsKeyCaseCompare , /* key compare */
2013-06-24 04:26:04 -04:00
dictSdsDestructor , /* key destructor */
NULL /* val destructor */
} ;
2010-06-21 18:07:48 -04:00
int htNeedsResize ( dict * dict ) {
long long size , used ;
size = dictSlots ( dict ) ;
used = dictSize ( dict ) ;
return ( size & & used & & size > DICT_HT_INITIAL_SIZE & &
( used * 100 / size < REDIS_HT_MINFILL ) ) ;
}
/* If the percentage of used slots in the HT reaches REDIS_HT_MINFILL
* we resize the hash table to save memory */
2013-03-08 08:01:12 -05:00
void tryResizeHashTables ( int dbid ) {
if ( htNeedsResize ( server . db [ dbid ] . dict ) )
dictResize ( server . db [ dbid ] . dict ) ;
if ( htNeedsResize ( server . db [ dbid ] . expires ) )
dictResize ( server . db [ dbid ] . expires ) ;
2010-06-21 18:07:48 -04:00
}
/* Our hash table implementation performs rehashing incrementally while
* we write / read from the hash table . Still if the server is idle , the hash
* table will use two tables for a long time . So we try to use 1 millisecond
2013-03-08 08:01:12 -05:00
* of CPU time at every call of this function to perform some rehahsing .
*
* The function returns 1 if some rehashing was performed , otherwise 0
* is returned . */
int incrementallyRehash ( int dbid ) {
/* Keys dictionary */
if ( dictIsRehashing ( server . db [ dbid ] . dict ) ) {
dictRehashMilliseconds ( server . db [ dbid ] . dict , 1 ) ;
return 1 ; /* already used our millisecond for this loop... */
}
/* Expires */
if ( dictIsRehashing ( server . db [ dbid ] . expires ) ) {
dictRehashMilliseconds ( server . db [ dbid ] . expires , 1 ) ;
return 1 ; /* already used our millisecond for this loop... */
2010-06-21 18:07:48 -04:00
}
2013-03-08 08:01:12 -05:00
return 0 ;
2010-06-21 18:07:48 -04:00
}
/* This function is called once a background process of some kind terminates,
* as we want to avoid resizing the hash tables when there is a child in order
* to play well with copy - on - write ( otherwise when a resize happens lots of
* memory pages are copied ) . The goal of this function is to update the ability
* for dict . c to resize the hash tables accordingly to the fact we have o not
* running childs . */
void updateDictResizePolicy ( void ) {
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid = = - 1 & & server . aof_child_pid = = - 1 )
2010-06-21 18:07:48 -04:00
dictEnableResize ( ) ;
else
dictDisableResize ( ) ;
}
/* ======================= Cron: called every 100 ms ======================== */
2013-08-05 06:05:22 -04:00
/* Helper function for the activeExpireCycle() function.
* This function will try to expire the key that is stored in the hash table
* entry ' de ' of the ' expires ' hash table of a Redis database .
*
* If the key is found to be expired , it is removed from the database and
* 1 is returned . Otherwise no operation is performed and 0 is returned .
*
* When a key is expired , server . stat_expiredkeys is incremented .
*
* The parameter ' now ' is the current time in milliseconds as is passed
* to the function to avoid too many gettimeofday ( ) syscalls . */
2014-03-20 11:20:37 -04:00
int activeExpireCycleTryExpire ( redisDb * db , dictEntry * de , long long now ) {
2013-08-05 06:05:22 -04:00
long long t = dictGetSignedIntegerVal ( de ) ;
if ( now > t ) {
sds key = dictGetKey ( de ) ;
robj * keyobj = createStringObject ( key , sdslen ( key ) ) ;
propagateExpire ( db , keyobj ) ;
dbDelete ( db , keyobj ) ;
notifyKeyspaceEvent ( REDIS_NOTIFY_EXPIRED ,
" expired " , keyobj , db - > id ) ;
decrRefCount ( keyobj ) ;
server . stat_expiredkeys + + ;
return 1 ;
} else {
return 0 ;
}
}
2010-08-02 12:13:39 -04:00
/* Try to expire a few timed out keys. The algorithm used is adaptive and
* will use few CPU cycles if there are few expiring keys , otherwise
* it will get more aggressive to avoid that too much memory is used by
2013-03-08 11:48:58 -05:00
* keys that can be removed from the keyspace .
*
2013-03-09 05:44:20 -05:00
* No more than REDIS_DBCRON_DBS_PER_CALL databases are tested at every
2013-08-05 06:05:22 -04:00
* iteration .
*
2013-08-06 06:36:13 -04:00
* This kind of call is used when Redis detects that timelimit_exit is
* true , so there is more work to do , and we do it more incrementally from
2013-08-06 06:55:49 -04:00
* the beforeSleep ( ) function of the event loop .
*
* Expire cycle type :
*
* If type is ACTIVE_EXPIRE_CYCLE_FAST the function will try to run a
* " fast " expire cycle that takes no longer than EXPIRE_FAST_CYCLE_DURATION
* microseconds , and is not repeated again before the same amount of time .
*
* If type is ACTIVE_EXPIRE_CYCLE_SLOW , that normal expire cycle is
* executed , where the time limit is a percentage of the REDIS_HZ period
* as specified by the REDIS_EXPIRELOOKUPS_TIME_PERC define . */
2013-08-05 06:05:22 -04:00
2013-08-06 06:55:49 -04:00
void activeExpireCycle ( int type ) {
2013-03-11 06:10:33 -04:00
/* This function has some global state in order to continue the work
* incrementally across calls . */
static unsigned int current_db = 0 ; /* Last DB tested. */
static int timelimit_exit = 0 ; /* Time limit hit in previous call? */
2013-08-06 06:55:49 -04:00
static long long last_fast_cycle = 0 ; /* When last fast cycle ran. */
2013-03-11 06:10:33 -04:00
2013-03-08 11:48:58 -05:00
unsigned int j , iteration = 0 ;
2013-03-11 05:51:03 -04:00
unsigned int dbs_per_call = REDIS_DBCRON_DBS_PER_CALL ;
2012-05-13 15:52:35 -04:00
long long start = ustime ( ) , timelimit ;
2012-05-13 10:40:29 -04:00
2013-08-06 06:55:49 -04:00
if ( type = = ACTIVE_EXPIRE_CYCLE_FAST ) {
2013-08-05 10:11:56 -04:00
/* Don't start a fast cycle if the previous cycle did not exited
* for time limt . Also don ' t repeat a fast cycle for the same period
* as the fast cycle total duration itself . */
if ( ! timelimit_exit ) return ;
2013-08-06 06:59:04 -04:00
if ( start < last_fast_cycle + ACTIVE_EXPIRE_CYCLE_FAST_DURATION * 2 ) return ;
2013-08-05 10:11:56 -04:00
last_fast_cycle = start ;
}
2013-08-05 06:05:22 -04:00
2013-03-11 06:10:33 -04:00
/* We usually should test REDIS_DBCRON_DBS_PER_CALL per iteration, with
* two exceptions :
*
* 1 ) Don ' t test more DBs than we have .
* 2 ) If last time we hit the time limit , we want to scan all DBs
* in this iteration , as there is work to do in some DB and we don ' t want
* expired keys to use memory for too much time . */
if ( dbs_per_call > server . dbnum | | timelimit_exit )
dbs_per_call = server . dbnum ;
2013-03-11 05:51:03 -04:00
2013-08-06 06:55:49 -04:00
/* We can use at max ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC percentage of CPU time
2012-05-13 10:40:29 -04:00
* per iteration . Since this function gets called with a frequency of
2012-12-14 11:10:40 -05:00
* server . hz times per second , the following is the max amount of
2012-05-13 15:52:35 -04:00
* microseconds we can spend in this function . */
2013-08-06 06:55:49 -04:00
timelimit = 1000000 * ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC / server . hz / 100 ;
2013-03-11 06:10:33 -04:00
timelimit_exit = 0 ;
2012-05-13 10:40:29 -04:00
if ( timelimit < = 0 ) timelimit = 1 ;
2010-08-02 12:13:39 -04:00
2013-08-06 06:55:49 -04:00
if ( type = = ACTIVE_EXPIRE_CYCLE_FAST )
timelimit = ACTIVE_EXPIRE_CYCLE_FAST_DURATION ; /* in microseconds. */
2013-08-05 06:05:22 -04:00
2013-03-11 05:51:03 -04:00
for ( j = 0 ; j < dbs_per_call ; j + + ) {
2012-05-14 10:04:41 -04:00
int expired ;
2013-03-08 11:48:58 -05:00
redisDb * db = server . db + ( current_db % server . dbnum ) ;
/* Increment the DB now so we are sure if we run out of time
* in the current DB we ' ll restart from the next . This allows to
* distribute the time evenly across DBs . */
current_db + + ;
2010-08-02 12:13:39 -04:00
/* Continue to expire if at the end of the cycle more than 25%
* of the keys were expired . */
do {
2013-03-09 05:48:54 -05:00
unsigned long num , slots ;
2013-08-06 09:00:43 -04:00
long long now , ttl_sum ;
int ttl_samples ;
2013-03-09 05:48:54 -05:00
/* If there is nothing to expire try next DB ASAP. */
2013-08-06 09:00:43 -04:00
if ( ( num = dictSize ( db - > expires ) ) = = 0 ) {
db - > avg_ttl = 0 ;
break ;
}
2013-03-09 05:48:54 -05:00
slots = dictSlots ( db - > expires ) ;
now = mstime ( ) ;
2010-08-02 12:13:39 -04:00
2012-05-13 15:52:35 -04:00
/* When there are less than 1% filled slots getting random
* keys is expensive , so stop here waiting for better times . . .
* The dictionary will be resized asap . */
if ( num & & slots > DICT_HT_INITIAL_SIZE & &
( num * 100 / slots < 1 ) ) break ;
2012-05-14 10:04:41 -04:00
/* The main collection cycle. Sample random keys among keys
* with an expire set , checking for expired ones . */
2010-08-02 12:13:39 -04:00
expired = 0 ;
2013-08-06 09:00:43 -04:00
ttl_sum = 0 ;
ttl_samples = 0 ;
2013-08-06 06:55:49 -04:00
if ( num > ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP )
num = ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP ;
2013-08-06 09:00:43 -04:00
2010-08-02 12:13:39 -04:00
while ( num - - ) {
dictEntry * de ;
2013-08-06 09:00:43 -04:00
long long ttl ;
2010-08-02 12:13:39 -04:00
if ( ( de = dictGetRandomKey ( db - > expires ) ) = = NULL ) break ;
2013-08-06 09:00:43 -04:00
ttl = dictGetSignedIntegerVal ( de ) - now ;
2013-08-05 06:05:22 -04:00
if ( activeExpireCycleTryExpire ( db , de , now ) ) expired + + ;
2013-08-06 09:00:43 -04:00
if ( ttl < 0 ) ttl = 0 ;
ttl_sum + = ttl ;
ttl_samples + + ;
2010-08-02 12:13:39 -04:00
}
2013-08-06 09:00:43 -04:00
/* Update the average TTL stats for this database. */
if ( ttl_samples ) {
long long avg_ttl = ttl_sum / ttl_samples ;
if ( db - > avg_ttl = = 0 ) db - > avg_ttl = avg_ttl ;
/* Smooth the value averaging with the previous one. */
db - > avg_ttl = ( db - > avg_ttl + avg_ttl ) / 2 ;
}
2012-05-11 13:17:31 -04:00
/* We can't block forever here even if there are many keys to
* expire . So after a given amount of milliseconds return to the
* caller waiting for the other active expire cycle . */
iteration + + ;
2013-03-11 05:42:14 -04:00
if ( ( iteration & 0xf ) = = 0 & & /* check once every 16 iterations. */
2013-03-11 06:10:33 -04:00
( ustime ( ) - start ) > timelimit )
{
timelimit_exit = 1 ;
}
2013-08-05 06:05:22 -04:00
if ( timelimit_exit ) return ;
2013-08-06 06:55:49 -04:00
/* We don't repeat the cycle if there are less than 25% of keys
* found expired in the current DB . */
} while ( expired > ACTIVE_EXPIRE_CYCLE_LOOKUPS_PER_LOOP / 4 ) ;
2010-08-02 12:13:39 -04:00
}
}
2014-03-20 06:47:12 -04:00
unsigned int getLRUClock ( void ) {
return ( mstime ( ) / REDIS_LRU_CLOCK_RESOLUTION ) & REDIS_LRU_CLOCK_MAX ;
2010-10-14 15:22:21 -04:00
}
2010-08-02 12:13:39 -04:00
2012-03-08 10:15:37 -05:00
/* Add a sample to the operations per second array of samples. */
void trackOperationsPerSecond ( void ) {
long long t = mstime ( ) - server . ops_sec_last_sample_time ;
long long ops = server . stat_numcommands - server . ops_sec_last_sample_ops ;
long long ops_sec ;
ops_sec = t > 0 ? ( ops * 1000 / t ) : 0 ;
server . ops_sec_samples [ server . ops_sec_idx ] = ops_sec ;
server . ops_sec_idx = ( server . ops_sec_idx + 1 ) % REDIS_OPS_SEC_SAMPLES ;
server . ops_sec_last_sample_time = mstime ( ) ;
server . ops_sec_last_sample_ops = server . stat_numcommands ;
}
/* Return the mean of all the samples. */
long long getOperationsPerSecond ( void ) {
int j ;
long long sum = 0 ;
for ( j = 0 ; j < REDIS_OPS_SEC_SAMPLES ; j + + )
sum + = server . ops_sec_samples [ j ] ;
return sum / REDIS_OPS_SEC_SAMPLES ;
}
2012-03-15 15:51:35 -04:00
/* Check for timeouts. Returns non-zero if the client was terminated */
int clientsCronHandleTimeout ( redisClient * c ) {
2013-12-05 08:55:07 -05:00
time_t now = server . unixtime ;
2012-03-13 13:05:11 -04:00
if ( server . maxidletime & &
! ( c - > flags & REDIS_SLAVE ) & & /* no timeout for slaves */
! ( c - > flags & REDIS_MASTER ) & & /* no timeout for masters */
! ( c - > flags & REDIS_BLOCKED ) & & /* no timeout for BLPOP */
dictSize ( c - > pubsub_channels ) = = 0 & & /* no timeout for pubsub */
listLength ( c - > pubsub_patterns ) = = 0 & &
( now - c - > lastinteraction > server . maxidletime ) )
{
redisLog ( REDIS_VERBOSE , " Closing idle client " ) ;
freeClient ( c ) ;
2012-03-15 15:51:35 -04:00
return 1 ;
2012-03-13 13:05:11 -04:00
} else if ( c - > flags & REDIS_BLOCKED ) {
2013-12-05 08:55:07 -05:00
/* Blocked OPS timeout is handled with milliseconds resolution.
* However note that the actual resolution is limited by
* server . hz . */
mstime_t now_ms = mstime ( ) ;
if ( c - > bpop . timeout ! = 0 & & c - > bpop . timeout < now_ms ) {
2013-12-03 11:43:53 -05:00
replyToBlockedClientTimedOut ( c ) ;
unblockClient ( c ) ;
2012-03-13 13:05:11 -04:00
}
}
2012-03-15 15:51:35 -04:00
return 0 ;
2012-03-13 13:05:11 -04:00
}
2012-03-14 10:32:30 -04:00
/* The client query buffer is an sds.c string that can end with a lot of
2012-03-15 15:51:35 -04:00
* free space not used , this function reclaims space if needed .
*
2013-01-16 12:00:20 -05:00
* The function always returns 0 as it never terminates the client . */
2012-03-15 15:51:35 -04:00
int clientsCronResizeQueryBuffer ( redisClient * c ) {
2012-03-14 10:32:30 -04:00
size_t querybuf_size = sdsAllocSize ( c - > querybuf ) ;
time_t idletime = server . unixtime - c - > lastinteraction ;
/* There are two conditions to resize the query buffer:
* 1 ) Query buffer is > BIG_ARG and too big for latest peak .
* 2 ) Client is inactive and the buffer is bigger than 1 k . */
if ( ( ( querybuf_size > REDIS_MBULK_BIG_ARG ) & &
( querybuf_size / ( c - > querybuf_peak + 1 ) ) > 2 ) | |
( querybuf_size > 1024 & & idletime > 2 ) )
{
/* Only resize the query buffer if it is actually wasting space. */
if ( sdsavail ( c - > querybuf ) > 1024 ) {
c - > querybuf = sdsRemoveFreeSpace ( c - > querybuf ) ;
}
}
/* Reset the peak again to capture the peak memory usage in the next
* cycle . */
c - > querybuf_peak = 0 ;
2012-03-15 15:51:35 -04:00
return 0 ;
2012-03-14 10:32:30 -04:00
}
2012-03-13 13:05:11 -04:00
void clientsCron ( void ) {
2012-12-14 11:10:40 -05:00
/* Make sure to process at least 1/(server.hz*10) of clients per call.
* Since this function is called server . hz times per second we are sure that
2012-03-13 13:05:11 -04:00
* in the worst case we process all the clients in 10 seconds .
* In normal conditions ( a reasonable number of clients ) we process
* all the clients in a shorter time . */
2012-03-14 10:32:30 -04:00
int numclients = listLength ( server . clients ) ;
2012-12-14 11:10:40 -05:00
int iterations = numclients / ( server . hz * 10 ) ;
2012-03-13 13:05:11 -04:00
2012-03-14 10:32:30 -04:00
if ( iterations < 50 )
iterations = ( numclients < 50 ) ? numclients : 50 ;
2012-03-13 13:05:11 -04:00
while ( listLength ( server . clients ) & & iterations - - ) {
redisClient * c ;
listNode * head ;
/* Rotate the list, take the current head, process.
* This way if the client must be removed from the list it ' s the
* first element and we don ' t incur into O ( N ) computation . */
listRotate ( server . clients ) ;
head = listFirst ( server . clients ) ;
c = listNodeValue ( head ) ;
2012-03-15 15:51:35 -04:00
/* The following functions do different service checks on the client.
* The protocol is that they return non - zero if the client was
* terminated . */
if ( clientsCronHandleTimeout ( c ) ) continue ;
if ( clientsCronResizeQueryBuffer ( c ) ) continue ;
2012-03-13 13:05:11 -04:00
}
}
2013-03-08 05:54:33 -05:00
/* This function handles 'background' operations we are required to do
* incrementally in Redis databases , such as active key expiring , resizing ,
* rehashing . */
void databasesCron ( void ) {
2013-03-08 08:01:12 -05:00
/* Expire keys by random sampling. Not required for slaves
* as master will synthesize DELs for us . */
2013-08-06 06:36:13 -04:00
if ( server . active_expire_enabled & & server . masterhost = = NULL )
2013-08-06 06:55:49 -04:00
activeExpireCycle ( ACTIVE_EXPIRE_CYCLE_SLOW ) ;
2013-03-08 05:54:33 -05:00
2013-03-08 08:01:12 -05:00
/* Perform hash tables rehashing if needed, but only if there are no
* other processes saving the DB on disk . Otherwise rehashing is bad
* as will cause a lot of copy - on - write of memory pages . */
2013-03-08 05:54:33 -05:00
if ( server . rdb_child_pid = = - 1 & & server . aof_child_pid = = - 1 ) {
2013-03-08 08:01:12 -05:00
/* We use global counters so if we stop the computation at a given
* DB we ' ll be able to start from the successive in the next
* cron loop iteration . */
2013-03-08 11:41:20 -05:00
static unsigned int resize_db = 0 ;
static unsigned int rehash_db = 0 ;
2013-03-11 05:51:03 -04:00
unsigned int dbs_per_call = REDIS_DBCRON_DBS_PER_CALL ;
2013-03-08 11:41:20 -05:00
unsigned int j ;
2013-03-08 08:01:12 -05:00
2013-03-11 05:51:03 -04:00
/* Don't test more DBs than we have. */
if ( dbs_per_call > server . dbnum ) dbs_per_call = server . dbnum ;
2013-03-08 08:01:12 -05:00
/* Resize */
2013-03-11 05:51:03 -04:00
for ( j = 0 ; j < dbs_per_call ; j + + ) {
2013-03-08 08:01:12 -05:00
tryResizeHashTables ( resize_db % server . dbnum ) ;
resize_db + + ;
}
/* Rehash */
if ( server . activerehashing ) {
2013-03-11 05:51:03 -04:00
for ( j = 0 ; j < dbs_per_call ; j + + ) {
2013-03-08 08:01:12 -05:00
int work_done = incrementallyRehash ( rehash_db % server . dbnum ) ;
rehash_db + + ;
if ( work_done ) {
/* If the function did some work, stop here, we'll do
* more at the next cron loop . */
break ;
}
}
}
2013-03-08 05:54:33 -05:00
}
}
2014-02-13 09:09:41 -05:00
/* We take a cached value of the unix time in the global state because with
* virtual memory and aging there is to store the current time in objects at
* every object access , and accuracy is not needed . To access a global var is
* a lot faster than calling time ( NULL ) */
void updateCachedTime ( void ) {
server . unixtime = time ( NULL ) ;
server . mstime = mstime ( ) ;
}
2012-12-14 11:10:40 -05:00
/* This is our timer interrupt, called server.hz times per second.
2012-05-13 10:40:29 -04:00
* Here is where we do a number of things that need to be done asynchronously .
* For instance :
*
* - Active expired keys collection ( it is also performed in a lazy way on
* lookup ) .
2013-01-16 12:00:20 -05:00
* - Software watchdog .
2012-05-13 10:40:29 -04:00
* - Update some statistic .
* - Incremental rehashing of the DBs hash tables .
* - Triggering BGSAVE / AOF rewrite , and handling of terminated children .
2013-01-16 12:00:20 -05:00
* - Clients timeout of different kinds .
2012-05-13 10:40:29 -04:00
* - Replication reconnection .
* - Many more . . .
*
2012-12-14 11:10:40 -05:00
* Everything directly called here will be called server . hz times per second ,
2012-05-13 10:40:29 -04:00
* so in order to throttle execution of things we want to do less frequently
* a macro is used : run_with_period ( milliseconds ) { . . . . }
*/
2010-06-21 18:07:48 -04:00
int serverCron ( struct aeEventLoop * eventLoop , long long id , void * clientData ) {
2012-07-23 06:54:52 -04:00
int j ;
2010-06-21 18:07:48 -04:00
REDIS_NOTUSED ( eventLoop ) ;
REDIS_NOTUSED ( id ) ;
REDIS_NOTUSED ( clientData ) ;
2012-03-27 05:47:51 -04:00
/* Software watchdog: deliver the SIGALRM that will reach the signal
* handler if we don ' t return here fast enough . */
if ( server . watchdog_period ) watchdogScheduleSignal ( server . watchdog_period ) ;
2014-02-13 09:09:41 -05:00
/* Update the time cache. */
updateCachedTime ( ) ;
2011-09-13 06:21:54 -04:00
2012-05-13 10:40:29 -04:00
run_with_period ( 100 ) trackOperationsPerSecond ( ) ;
2012-03-08 10:15:37 -05:00
2014-03-20 06:47:12 -04:00
/* We have just REDIS_LRU_BITS bits per object for LRU information.
* So we use an ( eventually wrapping ) LRU clock .
2010-06-21 18:07:48 -04:00
*
2014-03-20 06:47:12 -04:00
* Note that even if the counter wraps it ' s not a big problem ,
* everything will still work but some object will appear younger
* to Redis . However for this to happen a given object should never be
* touched for all the time needed to the counter to wrap , which is
* not likely .
2010-10-14 15:22:21 -04:00
*
* Note that you can change the resolution altering the
2014-03-20 06:47:12 -04:00
* REDIS_LRU_CLOCK_RESOLUTION define . */
server . lruclock = getLRUClock ( ) ;
2010-06-21 18:07:48 -04:00
2011-04-21 04:49:52 -04:00
/* Record the max memory used since the server was started. */
if ( zmalloc_used_memory ( ) > server . stat_peak_memory )
server . stat_peak_memory = zmalloc_used_memory ( ) ;
2010-06-21 18:07:48 -04:00
/* We received a SIGTERM, shutting down here in a safe way, as it is
* not ok doing so inside the signal handler . */
if ( server . shutdown_asap ) {
2011-11-18 08:10:48 -05:00
if ( prepareForShutdown ( 0 ) = = REDIS_OK ) exit ( 0 ) ;
2010-06-21 18:07:48 -04:00
redisLog ( REDIS_WARNING , " SIGTERM received but errors trying to shut down the server, check the logs for more information " ) ;
2013-01-19 07:19:41 -05:00
server . shutdown_asap = 0 ;
2010-06-21 18:07:48 -04:00
}
/* Show some info about non-empty databases */
2012-05-13 10:40:29 -04:00
run_with_period ( 5000 ) {
for ( j = 0 ; j < server . dbnum ; j + + ) {
long long size , used , vkeys ;
size = dictSlots ( server . db [ j ] . dict ) ;
used = dictSize ( server . db [ j ] . dict ) ;
vkeys = dictSize ( server . db [ j ] . expires ) ;
if ( used | | vkeys ) {
redisLog ( REDIS_VERBOSE , " DB %d: %lld keys (%lld volatile) in %lld slots HT. " , j , used , vkeys , size ) ;
/* dictPrintStats(server.dict); */
}
2010-06-21 18:07:48 -04:00
}
}
/* Show information about connected clients */
2012-07-23 06:54:52 -04:00
if ( ! server . sentinel_mode ) {
run_with_period ( 5000 ) {
redisLog ( REDIS_VERBOSE ,
2013-02-27 06:27:15 -05:00
" %lu clients connected (%lu slaves), %zu bytes in use " ,
2012-07-23 06:54:52 -04:00
listLength ( server . clients ) - listLength ( server . slaves ) ,
listLength ( server . slaves ) ,
zmalloc_used_memory ( ) ) ;
}
2010-06-21 18:07:48 -04:00
}
2012-03-13 13:05:11 -04:00
/* We need to do a few operations on clients asynchronously. */
clientsCron ( ) ;
2010-06-21 18:07:48 -04:00
2013-03-08 07:59:50 -05:00
/* Handle background operations on Redis databases. */
databasesCron ( ) ;
2011-06-10 06:39:23 -04:00
/* Start a scheduled AOF rewrite if this was requested by the user while
* a BGSAVE was in progress . */
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid = = - 1 & & server . aof_child_pid = = - 1 & &
2011-12-21 05:58:42 -05:00
server . aof_rewrite_scheduled )
2011-06-10 06:39:23 -04:00
{
rewriteAppendOnlyFileBackground ( ) ;
}
2011-01-07 17:41:00 -05:00
/* Check if a background saving or AOF rewrite in progress terminated. */
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid ! = - 1 | | server . aof_child_pid ! = - 1 ) {
2010-06-21 18:07:48 -04:00
int statloc ;
pid_t pid ;
if ( ( pid = wait3 ( & statloc , WNOHANG , NULL ) ) ! = 0 ) {
2011-01-07 12:15:14 -05:00
int exitcode = WEXITSTATUS ( statloc ) ;
int bysignal = 0 ;
if ( WIFSIGNALED ( statloc ) ) bysignal = WTERMSIG ( statloc ) ;
2011-12-21 06:22:13 -05:00
if ( pid = = server . rdb_child_pid ) {
2011-01-07 12:15:14 -05:00
backgroundSaveDoneHandler ( exitcode , bysignal ) ;
2012-11-01 17:39:39 -04:00
} else if ( pid = = server . aof_child_pid ) {
2011-01-07 12:15:14 -05:00
backgroundRewriteDoneHandler ( exitcode , bysignal ) ;
2012-11-01 17:39:39 -04:00
} else {
redisLog ( REDIS_WARNING ,
" Warning, detected child with unmatched pid: %ld " ,
( long ) pid ) ;
2010-06-21 18:07:48 -04:00
}
updateDictResizePolicy ( ) ;
}
2011-06-25 06:22:03 -04:00
} else {
2011-06-10 06:39:23 -04:00
/* If there is not a background saving/rewrite in progress check if
* we have to save / rewrite now */
2010-06-21 18:07:48 -04:00
for ( j = 0 ; j < server . saveparamslen ; j + + ) {
struct saveparam * sp = server . saveparams + j ;
2013-04-02 08:05:50 -04:00
/* Save if we reached the given amount of changes,
* the given amount of seconds , and if the latest bgsave was
* successful or if , in case of an error , at least
* REDIS_BGSAVE_RETRY_DELAY seconds already elapsed . */
2010-06-21 18:07:48 -04:00
if ( server . dirty > = sp - > changes & &
2013-04-02 08:05:50 -04:00
server . unixtime - server . lastsave > sp - > seconds & &
( server . unixtime - server . lastbgsave_try >
REDIS_BGSAVE_RETRY_DELAY | |
server . lastbgsave_status = = REDIS_OK ) )
{
2010-06-21 18:07:48 -04:00
redisLog ( REDIS_NOTICE , " %d changes in %d seconds. Saving... " ,
2013-02-27 06:27:15 -05:00
sp - > changes , ( int ) sp - > seconds ) ;
2011-12-21 06:22:13 -05:00
rdbSaveBackground ( server . rdb_filename ) ;
2010-06-21 18:07:48 -04:00
break ;
}
}
2011-06-10 06:39:23 -04:00
/* Trigger an AOF rewrite if needed */
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid = = - 1 & &
2011-12-21 06:17:02 -05:00
server . aof_child_pid = = - 1 & &
2011-12-21 05:58:42 -05:00
server . aof_rewrite_perc & &
server . aof_current_size > server . aof_rewrite_min_size )
2011-06-10 06:39:23 -04:00
{
2011-12-21 05:58:42 -05:00
long long base = server . aof_rewrite_base_size ?
server . aof_rewrite_base_size : 1 ;
long long growth = ( server . aof_current_size * 100 / base ) - 100 ;
if ( growth > = server . aof_rewrite_perc ) {
2011-06-10 09:15:29 -04:00
redisLog ( REDIS_NOTICE , " Starting automatic rewriting of AOF on %lld%% growth " , growth ) ;
2011-06-10 06:39:23 -04:00
rewriteAppendOnlyFileBackground ( ) ;
}
}
2010-06-21 18:07:48 -04:00
}
2011-09-16 06:35:12 -04:00
2014-02-12 10:27:59 -05:00
/* AOF postponed flush: Try at every cron cycle if the slow fsync
* completed . */
if ( server . aof_flush_postponed_start ) flushAppendOnlyFile ( 0 ) ;
/* AOF write errors: in this case we have a buffer to flush as well and
* clear the AOF error in case of success to make the DB writable again ,
* however to try every second is enough in case of ' hz ' is set to
* an higher frequency . */
run_with_period ( 1000 ) {
if ( server . aof_last_write_status = = REDIS_ERR )
flushAppendOnlyFile ( 0 ) ;
2014-02-12 06:47:10 -05:00
}
2011-09-16 06:35:12 -04:00
2012-01-23 10:17:22 -05:00
/* Close clients that need to be closed asynchronous */
freeClientsInAsyncFreeQueue ( ) ;
2014-02-04 09:52:09 -05:00
/* Clear the paused clients flag if needed. */
clientsArePaused ( ) ; /* Don't check return value, just use the side effect. */
2010-11-04 12:29:53 -04:00
/* Replication cron function -- used to reconnect to master and
* to detect transfer failures . */
2012-05-13 10:40:29 -04:00
run_with_period ( 1000 ) replicationCron ( ) ;
2010-11-04 12:29:53 -04:00
2012-11-11 18:45:10 -05:00
/* Run the Redis Cluster cron. */
2013-10-09 10:18:33 -04:00
run_with_period ( 100 ) {
2012-05-13 10:40:29 -04:00
if ( server . cluster_enabled ) clusterCron ( ) ;
}
2011-03-29 11:51:15 -04:00
2012-11-11 18:45:10 -05:00
/* Run the Sentinel timer if we are in sentinel mode. */
2012-07-23 06:54:52 -04:00
run_with_period ( 100 ) {
if ( server . sentinel_mode ) sentinelTimer ( ) ;
}
2012-11-11 18:45:10 -05:00
/* Cleanup expired MIGRATE cached sockets. */
run_with_period ( 1000 ) {
migrateCloseTimedoutSockets ( ) ;
}
2011-01-20 07:18:23 -05:00
server . cronloops + + ;
2012-12-14 11:10:40 -05:00
return 1000 / server . hz ;
2010-06-21 18:07:48 -04:00
}
/* This function gets called every time Redis is entering the
* main loop of the event driven library , that is , before to sleep
* for ready file descriptors . */
void beforeSleep ( struct aeEventLoop * eventLoop ) {
REDIS_NOTUSED ( eventLoop ) ;
2013-08-27 03:31:43 -04:00
/* Run a fast expire cycle (the called function will return
* ASAP if a fast cycle is not needed ) . */
if ( server . active_expire_enabled & & server . masterhost = = NULL )
activeExpireCycle ( ACTIVE_EXPIRE_CYCLE_FAST ) ;
2013-08-05 06:05:22 -04:00
2013-12-04 09:52:20 -05:00
/* Send all the slaves an ACK request if at least one client blocked
* during the previous event loop iteration . */
if ( server . get_ack_from_slaves ) {
robj * argv [ 3 ] ;
argv [ 0 ] = createStringObject ( " REPLCONF " , 8 ) ;
argv [ 1 ] = createStringObject ( " GETACK " , 6 ) ;
argv [ 2 ] = createStringObject ( " * " , 1 ) ; /* Not used argument. */
replicationFeedSlaves ( server . slaves , server . slaveseldb , argv , 3 ) ;
decrRefCount ( argv [ 0 ] ) ;
decrRefCount ( argv [ 1 ] ) ;
decrRefCount ( argv [ 2 ] ) ;
server . get_ack_from_slaves = 0 ;
}
/* Unblock all the clients blocked for synchronous replication
* in WAIT . */
if ( listLength ( server . clients_waiting_acks ) )
processClientsWaitingReplicas ( ) ;
2010-12-06 10:39:39 -05:00
/* Try to process pending commands for clients that were just unblocked. */
2013-12-04 09:52:20 -05:00
if ( listLength ( server . unblocked_clients ) )
processUnblockedClients ( ) ;
2010-12-06 10:39:39 -05:00
2010-06-21 18:07:48 -04:00
/* Write the AOF buffer on disk */
2011-09-16 06:35:12 -04:00
flushAppendOnlyFile ( 0 ) ;
2013-09-26 10:54:43 -04:00
/* Call the Redis Cluster before sleep function. */
if ( server . cluster_enabled ) clusterBeforeSleep ( ) ;
2010-06-21 18:07:48 -04:00
}
/* =========================== Server initialization ======================== */
void createSharedObjects ( void ) {
int j ;
shared . crlf = createObject ( REDIS_STRING , sdsnew ( " \r \n " ) ) ;
shared . ok = createObject ( REDIS_STRING , sdsnew ( " +OK \r \n " ) ) ;
shared . err = createObject ( REDIS_STRING , sdsnew ( " -ERR \r \n " ) ) ;
shared . emptybulk = createObject ( REDIS_STRING , sdsnew ( " $0 \r \n \r \n " ) ) ;
shared . czero = createObject ( REDIS_STRING , sdsnew ( " :0 \r \n " ) ) ;
shared . cone = createObject ( REDIS_STRING , sdsnew ( " :1 \r \n " ) ) ;
shared . cnegone = createObject ( REDIS_STRING , sdsnew ( " :-1 \r \n " ) ) ;
shared . nullbulk = createObject ( REDIS_STRING , sdsnew ( " $-1 \r \n " ) ) ;
shared . nullmultibulk = createObject ( REDIS_STRING , sdsnew ( " *-1 \r \n " ) ) ;
shared . emptymultibulk = createObject ( REDIS_STRING , sdsnew ( " *0 \r \n " ) ) ;
shared . pong = createObject ( REDIS_STRING , sdsnew ( " +PONG \r \n " ) ) ;
shared . queued = createObject ( REDIS_STRING , sdsnew ( " +QUEUED \r \n " ) ) ;
2013-10-28 06:17:32 -04:00
shared . emptyscan = createObject ( REDIS_STRING , sdsnew ( " *2 \r \n $1 \r \n 0 \r \n *0 \r \n " ) ) ;
2010-06-21 18:07:48 -04:00
shared . wrongtypeerr = createObject ( REDIS_STRING , sdsnew (
2012-11-06 14:25:34 -05:00
" -WRONGTYPE Operation against a key holding the wrong kind of value \r \n " ) ) ;
2010-06-21 18:07:48 -04:00
shared . nokeyerr = createObject ( REDIS_STRING , sdsnew (
" -ERR no such key \r \n " ) ) ;
shared . syntaxerr = createObject ( REDIS_STRING , sdsnew (
" -ERR syntax error \r \n " ) ) ;
shared . sameobjecterr = createObject ( REDIS_STRING , sdsnew (
" -ERR source and destination objects are the same \r \n " ) ) ;
shared . outofrangeerr = createObject ( REDIS_STRING , sdsnew (
" -ERR index out of range \r \n " ) ) ;
2011-05-13 16:02:38 -04:00
shared . noscripterr = createObject ( REDIS_STRING , sdsnew (
" -NOSCRIPT No matching script. Please use EVAL. \r \n " ) ) ;
2010-11-08 05:52:03 -05:00
shared . loadingerr = createObject ( REDIS_STRING , sdsnew (
" -LOADING Redis is loading the dataset in memory \r \n " ) ) ;
2011-10-27 08:49:10 -04:00
shared . slowscripterr = createObject ( REDIS_STRING , sdsnew (
2011-11-18 08:10:48 -05:00
" -BUSY Redis is busy running a script. You can only call SCRIPT KILL or SHUTDOWN NOSAVE. \r \n " ) ) ;
2012-05-02 11:14:45 -04:00
shared . masterdownerr = createObject ( REDIS_STRING , sdsnew (
" -MASTERDOWN Link with MASTER is down and slave-serve-stale-data is set to 'no'. \r \n " ) ) ;
2012-03-07 07:05:46 -05:00
shared . bgsaveerr = createObject ( REDIS_STRING , sdsnew (
2012-03-20 12:32:48 -04:00
" -MISCONF Redis is configured to save RDB snapshots, but is currently not able to persist on disk. Commands that may modify the data set are disabled. Please check Redis logs for details about the error. \r \n " ) ) ;
shared . roslaveerr = createObject ( REDIS_STRING , sdsnew (
" -READONLY You can't write against a read only slave. \r \n " ) ) ;
2013-02-12 10:25:41 -05:00
shared . noautherr = createObject ( REDIS_STRING , sdsnew (
" -NOAUTH Authentication required. \r \n " ) ) ;
2012-03-21 07:11:07 -04:00
shared . oomerr = createObject ( REDIS_STRING , sdsnew (
" -OOM command not allowed when used memory > 'maxmemory'. \r \n " ) ) ;
2012-11-15 14:11:05 -05:00
shared . execaborterr = createObject ( REDIS_STRING , sdsnew (
" -EXECABORT Transaction discarded because of previous errors. \r \n " ) ) ;
2013-05-29 05:36:44 -04:00
shared . noreplicaserr = createObject ( REDIS_STRING , sdsnew (
" -NOREPLICAS Not enough good slaves to write. \r \n " ) ) ;
2010-06-21 18:07:48 -04:00
shared . space = createObject ( REDIS_STRING , sdsnew ( " " ) ) ;
shared . colon = createObject ( REDIS_STRING , sdsnew ( " : " ) ) ;
shared . plus = createObject ( REDIS_STRING , sdsnew ( " + " ) ) ;
2012-03-29 13:06:53 -04:00
for ( j = 0 ; j < REDIS_SHARED_SELECT_CMDS ; j + + ) {
2012-11-03 07:15:29 -04:00
char dictid_str [ 64 ] ;
int dictid_len ;
dictid_len = ll2string ( dictid_str , sizeof ( dictid_str ) , j ) ;
2012-03-29 13:06:53 -04:00
shared . select [ j ] = createObject ( REDIS_STRING ,
2012-11-03 07:15:29 -04:00
sdscatprintf ( sdsempty ( ) ,
" *2 \r \n $6 \r \n SELECT \r \n $%d \r \n %s \r \n " ,
dictid_len , dictid_str ) ) ;
2012-03-29 13:06:53 -04:00
}
2010-06-21 18:07:48 -04:00
shared . messagebulk = createStringObject ( " $7 \r \n message \r \n " , 13 ) ;
shared . pmessagebulk = createStringObject ( " $8 \r \n pmessage \r \n " , 14 ) ;
shared . subscribebulk = createStringObject ( " $9 \r \n subscribe \r \n " , 15 ) ;
shared . unsubscribebulk = createStringObject ( " $11 \r \n unsubscribe \r \n " , 18 ) ;
shared . psubscribebulk = createStringObject ( " $10 \r \n psubscribe \r \n " , 17 ) ;
shared . punsubscribebulk = createStringObject ( " $12 \r \n punsubscribe \r \n " , 19 ) ;
2012-02-04 02:58:37 -05:00
shared . del = createStringObject ( " DEL " , 3 ) ;
2012-02-29 08:41:57 -05:00
shared . rpop = createStringObject ( " RPOP " , 4 ) ;
shared . lpop = createStringObject ( " LPOP " , 4 ) ;
A reimplementation of blocking operation internals.
Redis provides support for blocking operations such as BLPOP or BRPOP.
This operations are identical to normal LPOP and RPOP operations as long
as there are elements in the target list, but if the list is empty they
block waiting for new data to arrive to the list.
All the clients blocked waiting for th same list are served in a FIFO
way, so the first that blocked is the first to be served when there is
more data pushed by another client into the list.
The previous implementation of blocking operations was conceived to
serve clients in the context of push operations. For for instance:
1) There is a client "A" blocked on list "foo".
2) The client "B" performs `LPUSH foo somevalue`.
3) The client "A" is served in the context of the "B" LPUSH,
synchronously.
Processing things in a synchronous way was useful as if "A" pushes a
value that is served by "B", from the point of view of the database is a
NOP (no operation) thing, that is, nothing is replicated, nothing is
written in the AOF file, and so forth.
However later we implemented two things:
1) Variadic LPUSH that could add multiple values to a list in the
context of a single call.
2) BRPOPLPUSH that was a version of BRPOP that also provided a "PUSH"
side effect when receiving data.
This forced us to make the synchronous implementation more complex. If
client "B" is waiting for data, and "A" pushes three elemnents in a
single call, we needed to propagate an LPUSH with a missing argument
in the AOF and replication link. We also needed to make sure to
replicate the LPUSH side of BRPOPLPUSH, but only if in turn did not
happened to serve another blocking client into another list ;)
This were complex but with a few of mutually recursive functions
everything worked as expected... until one day we introduced scripting
in Redis.
Scripting + synchronous blocking operations = Issue #614.
Basically you can't "rewrite" a script to have just a partial effect on
the replicas and AOF file if the script happened to serve a few blocked
clients.
The solution to all this problems, implemented by this commit, is to
change the way we serve blocked clients. Instead of serving the blocked
clients synchronously, in the context of the command performing the PUSH
operation, it is now an asynchronous and iterative process:
1) If a key that has clients blocked waiting for data is the subject of
a list push operation, We simply mark keys as "ready" and put it into a
queue.
2) Every command pushing stuff on lists, as a variadic LPUSH, a script,
or whatever it is, is replicated verbatim without any rewriting.
3) Every time a Redis command, a MULTI/EXEC block, or a script,
completed its execution, we run the list of keys ready to serve blocked
clients (as more data arrived), and process this list serving the
blocked clients.
4) As a result of "3" maybe more keys are ready again for other clients
(as a result of BRPOPLPUSH we may have push operations), so we iterate
back to step "3" if it's needed.
The new code has a much simpler semantics, and a simpler to understand
implementation, with the disadvantage of not being able to "optmize out"
a PUSH+BPOP as a No OP.
This commit will be tested with care before the final merge, more tests
will be added likely.
2012-09-04 04:37:49 -04:00
shared . lpush = createStringObject ( " LPUSH " , 5 ) ;
2010-06-21 18:07:48 -04:00
for ( j = 0 ; j < REDIS_SHARED_INTEGERS ; j + + ) {
shared . integers [ j ] = createObject ( REDIS_STRING , ( void * ) ( long ) j ) ;
shared . integers [ j ] - > encoding = REDIS_ENCODING_INT ;
}
2012-02-04 02:58:37 -05:00
for ( j = 0 ; j < REDIS_SHARED_BULKHDR_LEN ; j + + ) {
shared . mbulkhdr [ j ] = createObject ( REDIS_STRING ,
sdscatprintf ( sdsempty ( ) , " *%d \r \n " , j ) ) ;
shared . bulkhdr [ j ] = createObject ( REDIS_STRING ,
sdscatprintf ( sdsempty ( ) , " $%d \r \n " , j ) ) ;
}
2010-06-21 18:07:48 -04:00
}
void initServerConfig ( ) {
2013-05-13 12:34:18 -04:00
int j ;
2012-03-08 04:13:12 -05:00
getRandomHexChars ( server . runid , REDIS_RUN_ID_SIZE ) ;
2013-05-09 10:57:59 -04:00
server . configfile = NULL ;
2012-12-14 11:10:40 -05:00
server . hz = REDIS_DEFAULT_HZ ;
2012-03-08 04:13:12 -05:00
server . runid [ REDIS_RUN_ID_SIZE ] = ' \0 ' ;
2012-02-02 04:02:40 -05:00
server . arch_bits = ( sizeof ( long ) = = 8 ) ? 64 : 32 ;
2010-06-21 18:07:48 -04:00
server . port = REDIS_SERVERPORT ;
2014-01-31 08:55:43 -05:00
server . tcp_backlog = REDIS_TCP_BACKLOG ;
2013-07-04 12:50:15 -04:00
server . bindaddr_count = 0 ;
2010-10-13 11:17:56 -04:00
server . unixsocket = NULL ;
2013-05-15 04:12:29 -04:00
server . unixsocketperm = REDIS_DEFAULT_UNIX_SOCKET_PERM ;
2013-07-05 05:47:20 -04:00
server . ipfd_count = 0 ;
2010-08-03 07:33:12 -04:00
server . sofd = - 1 ;
server . dbnum = REDIS_DEFAULT_DBNUM ;
2013-05-15 04:12:29 -04:00
server . verbosity = REDIS_DEFAULT_VERBOSITY ;
2010-06-21 18:07:48 -04:00
server . maxidletime = REDIS_MAXIDLETIME ;
2013-05-15 04:12:29 -04:00
server . tcpkeepalive = REDIS_DEFAULT_TCP_KEEPALIVE ;
2013-03-27 12:55:02 -04:00
server . active_expire_enabled = 1 ;
2011-11-21 10:17:51 -05:00
server . client_max_querybuf_len = REDIS_MAX_QUERYBUF_LEN ;
2010-06-21 18:07:48 -04:00
server . saveparams = NULL ;
2010-11-08 05:52:03 -05:00
server . loading = 0 ;
2013-05-15 04:12:29 -04:00
server . logfile = zstrdup ( REDIS_DEFAULT_LOGFILE ) ;
server . syslog_enabled = REDIS_DEFAULT_SYSLOG_ENABLED ;
2013-05-09 18:15:18 -04:00
server . syslog_ident = zstrdup ( REDIS_DEFAULT_SYSLOG_IDENT ) ;
2010-12-09 11:10:21 -05:00
server . syslog_facility = LOG_LOCAL0 ;
2013-05-15 04:12:29 -04:00
server . daemonize = REDIS_DEFAULT_DAEMONIZE ;
2011-12-21 04:31:34 -05:00
server . aof_state = REDIS_AOF_OFF ;
2013-05-15 04:12:29 -04:00
server . aof_fsync = REDIS_DEFAULT_AOF_FSYNC ;
server . aof_no_fsync_on_rewrite = REDIS_DEFAULT_AOF_NO_FSYNC_ON_REWRITE ;
2011-12-21 05:58:42 -05:00
server . aof_rewrite_perc = REDIS_AOF_REWRITE_PERC ;
server . aof_rewrite_min_size = REDIS_AOF_REWRITE_MIN_SIZE ;
server . aof_rewrite_base_size = 0 ;
server . aof_rewrite_scheduled = 0 ;
2011-12-21 06:17:02 -05:00
server . aof_last_fsync = time ( NULL ) ;
2012-05-25 06:11:30 -04:00
server . aof_rewrite_time_last = - 1 ;
server . aof_rewrite_time_start = - 1 ;
2012-07-16 22:06:53 -04:00
server . aof_lastbgrewrite_status = REDIS_OK ;
2012-03-25 05:27:35 -04:00
server . aof_delayed_fsync = 0 ;
2011-12-21 06:17:02 -05:00
server . aof_fd = - 1 ;
server . aof_selected_db = - 1 ; /* Make sure the first time will not match */
2011-09-16 06:35:12 -04:00
server . aof_flush_postponed_start = 0 ;
2013-05-15 04:12:29 -04:00
server . aof_rewrite_incremental_fsync = REDIS_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC ;
2013-05-09 18:15:18 -04:00
server . pidfile = zstrdup ( REDIS_DEFAULT_PID_FILE ) ;
2013-05-15 04:12:29 -04:00
server . rdb_filename = zstrdup ( REDIS_DEFAULT_RDB_FILENAME ) ;
2013-11-30 10:57:48 -05:00
server . aof_filename = zstrdup ( REDIS_DEFAULT_AOF_FILENAME ) ;
2010-06-21 18:07:48 -04:00
server . requirepass = NULL ;
2013-05-15 04:12:29 -04:00
server . rdb_compression = REDIS_DEFAULT_RDB_COMPRESSION ;
server . rdb_checksum = REDIS_DEFAULT_RDB_CHECKSUM ;
server . stop_writes_on_bgsave_err = REDIS_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR ;
server . activerehashing = REDIS_DEFAULT_ACTIVE_REHASHING ;
2013-01-23 10:23:33 -05:00
server . notify_keyspace_events = 0 ;
2011-10-31 05:48:41 -04:00
server . maxclients = REDIS_MAX_CLIENTS ;
2010-12-06 08:05:01 -05:00
server . bpop_blocked_clients = 0 ;
2013-05-15 04:12:29 -04:00
server . maxmemory = REDIS_DEFAULT_MAXMEMORY ;
server . maxmemory_policy = REDIS_DEFAULT_MAXMEMORY_POLICY ;
server . maxmemory_samples = REDIS_DEFAULT_MAXMEMORY_SAMPLES ;
2012-01-03 01:14:10 -05:00
server . hash_max_ziplist_entries = REDIS_HASH_MAX_ZIPLIST_ENTRIES ;
server . hash_max_ziplist_value = REDIS_HASH_MAX_ZIPLIST_VALUE ;
2010-06-21 18:07:48 -04:00
server . list_max_ziplist_entries = REDIS_LIST_MAX_ZIPLIST_ENTRIES ;
server . list_max_ziplist_value = REDIS_LIST_MAX_ZIPLIST_VALUE ;
2010-07-02 13:57:12 -04:00
server . set_max_intset_entries = REDIS_SET_MAX_INTSET_ENTRIES ;
2011-03-09 08:01:57 -05:00
server . zset_max_ziplist_entries = REDIS_ZSET_MAX_ZIPLIST_ENTRIES ;
server . zset_max_ziplist_value = REDIS_ZSET_MAX_ZIPLIST_VALUE ;
2010-06-21 18:07:48 -04:00
server . shutdown_asap = 0 ;
2011-10-31 06:14:24 -04:00
server . repl_ping_slave_period = REDIS_REPL_PING_SLAVE_PERIOD ;
server . repl_timeout = REDIS_REPL_TIMEOUT ;
2013-05-29 05:36:44 -04:00
server . repl_min_slaves_to_write = REDIS_DEFAULT_MIN_SLAVES_TO_WRITE ;
server . repl_min_slaves_max_lag = REDIS_DEFAULT_MIN_SLAVES_MAX_LAG ;
2011-03-29 11:51:15 -04:00
server . cluster_enabled = 0 ;
2013-04-04 06:29:10 -04:00
server . cluster_node_timeout = REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT ;
2014-01-31 05:12:34 -05:00
server . cluster_migration_barrier = REDIS_CLUSTER_DEFAULT_MIGRATION_BARRIER ;
2013-05-09 18:15:18 -04:00
server . cluster_configfile = zstrdup ( REDIS_DEFAULT_CLUSTER_CONFIG_FILE ) ;
2011-11-18 08:10:48 -05:00
server . lua_caller = NULL ;
2011-05-06 11:21:27 -04:00
server . lua_time_limit = REDIS_LUA_TIME_LIMIT ;
2011-10-24 16:47:00 -04:00
server . lua_client = NULL ;
2011-10-27 08:49:10 -04:00
server . lua_timedout = 0 ;
2012-11-11 18:45:10 -05:00
server . migrate_cached_sockets = dictCreate ( & migrateCacheDictType , NULL ) ;
2012-12-12 08:59:22 -05:00
server . loading_process_events_interval_bytes = ( 1024 * 1024 * 2 ) ;
2010-06-21 18:07:48 -04:00
2014-03-20 06:47:12 -04:00
server . lruclock = getLRUClock ( ) ;
2010-06-21 18:07:48 -04:00
resetServerSaveParams ( ) ;
appendServerSaveParams ( 60 * 60 , 1 ) ; /* save after 1 hour and 1 change */
appendServerSaveParams ( 300 , 100 ) ; /* save after 5 minutes and 100 changes */
appendServerSaveParams ( 60 , 10000 ) ; /* save after 1 minute and 10000 changes */
/* Replication related */
server . masterauth = NULL ;
server . masterhost = NULL ;
server . masterport = 6379 ;
server . master = NULL ;
2013-01-30 12:33:16 -05:00
server . cached_master = NULL ;
server . repl_master_initial_offset = - 1 ;
2011-12-21 06:23:18 -05:00
server . repl_state = REDIS_REPL_NONE ;
2011-05-22 06:57:30 -04:00
server . repl_syncio_timeout = REDIS_REPL_SYNCIO_TIMEOUT ;
2013-05-15 04:12:29 -04:00
server . repl_serve_stale_data = REDIS_DEFAULT_SLAVE_SERVE_STALE_DATA ;
server . repl_slave_ro = REDIS_DEFAULT_SLAVE_READ_ONLY ;
2013-03-13 07:51:10 -04:00
server . repl_down_since = 0 ; /* Never connected, repl is down since EVER. */
2013-05-15 04:12:29 -04:00
server . repl_disable_tcp_nodelay = REDIS_DEFAULT_REPL_DISABLE_TCP_NODELAY ;
2012-08-28 11:20:26 -04:00
server . slave_priority = REDIS_DEFAULT_SLAVE_PRIORITY ;
2013-01-30 12:33:16 -05:00
server . master_repl_offset = 0 ;
/* Replication partial resync backlog */
server . repl_backlog = NULL ;
server . repl_backlog_size = REDIS_DEFAULT_REPL_BACKLOG_SIZE ;
server . repl_backlog_histlen = 0 ;
server . repl_backlog_idx = 0 ;
server . repl_backlog_off = 0 ;
server . repl_backlog_time_limit = REDIS_DEFAULT_REPL_BACKLOG_TIME_LIMIT ;
server . repl_no_slaves_since = time ( NULL ) ;
2010-06-21 18:07:48 -04:00
2012-01-23 10:12:37 -05:00
/* Client output buffer limits */
2013-05-13 12:34:18 -04:00
for ( j = 0 ; j < REDIS_CLIENT_LIMIT_NUM_CLASSES ; j + + )
server . client_obuf_limits [ j ] = clientBufferLimitsDefaults [ j ] ;
2012-01-23 10:12:37 -05:00
2010-06-21 18:07:48 -04:00
/* Double constants initialization */
R_Zero = 0.0 ;
R_PosInf = 1.0 / R_Zero ;
R_NegInf = - 1.0 / R_Zero ;
R_Nan = R_Zero / R_Zero ;
2010-11-03 07:14:36 -04:00
2013-01-16 12:00:20 -05:00
/* Command table -- we initiialize it here as it is part of the
2010-11-03 07:14:36 -04:00
* initial configuration , since command names may be changed via
* redis . conf using the rename - command directive . */
server . commands = dictCreate ( & commandTableDictType , NULL ) ;
2013-03-06 10:28:26 -05:00
server . orig_commands = dictCreate ( & commandTableDictType , NULL ) ;
2010-11-03 07:14:36 -04:00
populateCommandTable ( ) ;
server . delCommand = lookupCommandByCString ( " del " ) ;
server . multiCommand = lookupCommandByCString ( " multi " ) ;
2012-02-28 12:03:08 -05:00
server . lpushCommand = lookupCommandByCString ( " lpush " ) ;
A reimplementation of blocking operation internals.
Redis provides support for blocking operations such as BLPOP or BRPOP.
This operations are identical to normal LPOP and RPOP operations as long
as there are elements in the target list, but if the list is empty they
block waiting for new data to arrive to the list.
All the clients blocked waiting for th same list are served in a FIFO
way, so the first that blocked is the first to be served when there is
more data pushed by another client into the list.
The previous implementation of blocking operations was conceived to
serve clients in the context of push operations. For for instance:
1) There is a client "A" blocked on list "foo".
2) The client "B" performs `LPUSH foo somevalue`.
3) The client "A" is served in the context of the "B" LPUSH,
synchronously.
Processing things in a synchronous way was useful as if "A" pushes a
value that is served by "B", from the point of view of the database is a
NOP (no operation) thing, that is, nothing is replicated, nothing is
written in the AOF file, and so forth.
However later we implemented two things:
1) Variadic LPUSH that could add multiple values to a list in the
context of a single call.
2) BRPOPLPUSH that was a version of BRPOP that also provided a "PUSH"
side effect when receiving data.
This forced us to make the synchronous implementation more complex. If
client "B" is waiting for data, and "A" pushes three elemnents in a
single call, we needed to propagate an LPUSH with a missing argument
in the AOF and replication link. We also needed to make sure to
replicate the LPUSH side of BRPOPLPUSH, but only if in turn did not
happened to serve another blocking client into another list ;)
This were complex but with a few of mutually recursive functions
everything worked as expected... until one day we introduced scripting
in Redis.
Scripting + synchronous blocking operations = Issue #614.
Basically you can't "rewrite" a script to have just a partial effect on
the replicas and AOF file if the script happened to serve a few blocked
clients.
The solution to all this problems, implemented by this commit, is to
change the way we serve blocked clients. Instead of serving the blocked
clients synchronously, in the context of the command performing the PUSH
operation, it is now an asynchronous and iterative process:
1) If a key that has clients blocked waiting for data is the subject of
a list push operation, We simply mark keys as "ready" and put it into a
queue.
2) Every command pushing stuff on lists, as a variadic LPUSH, a script,
or whatever it is, is replicated verbatim without any rewriting.
3) Every time a Redis command, a MULTI/EXEC block, or a script,
completed its execution, we run the list of keys ready to serve blocked
clients (as more data arrived), and process this list serving the
blocked clients.
4) As a result of "3" maybe more keys are ready again for other clients
(as a result of BRPOPLPUSH we may have push operations), so we iterate
back to step "3" if it's needed.
The new code has a much simpler semantics, and a simpler to understand
implementation, with the disadvantage of not being able to "optmize out"
a PUSH+BPOP as a No OP.
This commit will be tested with care before the final merge, more tests
will be added likely.
2012-09-04 04:37:49 -04:00
server . lpopCommand = lookupCommandByCString ( " lpop " ) ;
server . rpopCommand = lookupCommandByCString ( " rpop " ) ;
2011-06-30 07:27:32 -04:00
/* Slow log */
server . slowlog_log_slower_than = REDIS_SLOWLOG_LOG_SLOWER_THAN ;
server . slowlog_max_len = REDIS_SLOWLOG_MAX_LEN ;
2011-11-24 09:47:26 -05:00
2012-03-27 05:47:51 -04:00
/* Debugging */
2011-11-24 09:47:26 -05:00
server . assert_failed = " <no assertion failed> " ;
server . assert_file = " <no file> " ;
server . assert_line = 0 ;
server . bug_report_start = 0 ;
2012-03-27 05:47:51 -04:00
server . watchdog_period = 0 ;
2010-06-21 18:07:48 -04:00
}
2011-12-15 05:42:40 -05:00
/* This function will try to raise the max number of open files accordingly to
* the configured max number of clients . It will also account for 32 additional
* file descriptors as we need a few more for persistence , listening
* sockets , log files and so forth .
*
* If it will not be possible to set the limit accordingly to the configured
* max number of clients , the function will do the reverse setting
* server . maxclients to the value that we can actually handle . */
void adjustOpenFilesLimit ( void ) {
rlim_t maxfiles = server . maxclients + 32 ;
struct rlimit limit ;
if ( getrlimit ( RLIMIT_NOFILE , & limit ) = = - 1 ) {
redisLog ( REDIS_WARNING , " Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly. " ,
strerror ( errno ) ) ;
server . maxclients = 1024 - 32 ;
} else {
rlim_t oldlimit = limit . rlim_cur ;
/* Set the max number of files if the current limit is not enough
* for our needs . */
if ( oldlimit < maxfiles ) {
2012-04-03 05:53:45 -04:00
rlim_t f ;
f = maxfiles ;
while ( f > oldlimit ) {
limit . rlim_cur = f ;
limit . rlim_max = f ;
if ( setrlimit ( RLIMIT_NOFILE , & limit ) ! = - 1 ) break ;
f - = 128 ;
}
if ( f < oldlimit ) f = oldlimit ;
if ( f ! = maxfiles ) {
server . maxclients = f - 32 ;
2011-12-15 05:42:40 -05:00
redisLog ( REDIS_WARNING , " Unable to set the max number of files limit to %d (%s), setting the max clients configuration to %d. " ,
( int ) maxfiles , strerror ( errno ) , ( int ) server . maxclients ) ;
} else {
redisLog ( REDIS_NOTICE , " Max number of open files set to %d " ,
( int ) maxfiles ) ;
}
}
}
}
2013-08-22 08:01:16 -04:00
/* Initialize a set of file descriptors to listen to the specified 'port'
* binding the addresses specified in the Redis server configuration .
*
* The listening file descriptors are stored in the integer array ' fds '
* and their number is set in ' * count ' .
*
* The addresses to bind are specified in the global server . bindaddr array
* and their number is server . bindaddr_count . If the server configuration
* contains no specific addresses to bind , this function will try to
* bind * ( all addresses ) for both the IPv4 and IPv6 protocols .
*
* On success the function returns REDIS_OK .
*
* On error the function returns REDIS_ERR . For the function to be on
* error , at least one of the server . bindaddr addresses was
* impossible to bind , or no bind addresses were specified in the server
* configuration but the function is not able to bind * for at least
* one of the IPv4 or IPv6 protocols . */
int listenToPort ( int port , int * fds , int * count ) {
int j ;
/* Force binding of 0.0.0.0 if no bind address is specified, always
* entering the loop if j = = 0. */
if ( server . bindaddr_count = = 0 ) server . bindaddr [ 0 ] = NULL ;
for ( j = 0 ; j < server . bindaddr_count | | j = = 0 ; j + + ) {
if ( server . bindaddr [ j ] = = NULL ) {
/* Bind * for both IPv6 and IPv4, we enter here only if
* server . bindaddr_count = = 0. */
2014-01-31 08:55:43 -05:00
fds [ * count ] = anetTcp6Server ( server . neterr , port , NULL ,
server . tcp_backlog ) ;
2013-08-22 08:01:16 -04:00
if ( fds [ * count ] ! = ANET_ERR ) ( * count ) + + ;
2014-01-31 08:55:43 -05:00
fds [ * count ] = anetTcpServer ( server . neterr , port , NULL ,
server . tcp_backlog ) ;
2013-08-22 08:01:16 -04:00
if ( fds [ * count ] ! = ANET_ERR ) ( * count ) + + ;
/* Exit the loop if we were able to bind * on IPv4 or IPv6,
* otherwise fds [ * count ] will be ANET_ERR and we ' ll print an
* error and return to the caller with an error . */
if ( * count ) break ;
} else if ( strchr ( server . bindaddr [ j ] , ' : ' ) ) {
/* Bind IPv6 address. */
2014-01-31 08:55:43 -05:00
fds [ * count ] = anetTcp6Server ( server . neterr , port , server . bindaddr [ j ] ,
server . tcp_backlog ) ;
2013-08-22 08:01:16 -04:00
} else {
/* Bind IPv4 address. */
2014-01-31 08:55:43 -05:00
fds [ * count ] = anetTcpServer ( server . neterr , port , server . bindaddr [ j ] ,
server . tcp_backlog ) ;
2013-08-22 08:01:16 -04:00
}
if ( fds [ * count ] = = ANET_ERR ) {
redisLog ( REDIS_WARNING ,
" Creating Server TCP listening socket %s:%d: %s " ,
server . bindaddr [ j ] ? server . bindaddr [ j ] : " * " ,
2014-02-19 17:26:33 -05:00
port , server . neterr ) ;
2013-08-22 08:01:16 -04:00
return REDIS_ERR ;
}
( * count ) + + ;
}
return REDIS_OK ;
}
2014-03-19 07:55:49 -04:00
/* Resets the stats that we expose via INFO or other means that we want
* to reset via CONFIG RESETSTAT . The function is also used in order to
* initialize these fields in initServer ( ) at server startup . */
void resetServerStats ( void ) {
server . stat_numcommands = 0 ;
server . stat_numconnections = 0 ;
server . stat_expiredkeys = 0 ;
server . stat_evictedkeys = 0 ;
server . stat_keyspace_misses = 0 ;
server . stat_keyspace_hits = 0 ;
server . stat_fork_time = 0 ;
server . stat_rejected_conn = 0 ;
server . stat_sync_full = 0 ;
server . stat_sync_partial_ok = 0 ;
server . stat_sync_partial_err = 0 ;
memset ( server . ops_sec_samples , 0 , sizeof ( server . ops_sec_samples ) ) ;
server . ops_sec_idx = 0 ;
server . ops_sec_last_sample_time = mstime ( ) ;
server . ops_sec_last_sample_ops = 0 ;
}
2010-06-21 18:07:48 -04:00
void initServer ( ) {
int j ;
signal ( SIGHUP , SIG_IGN ) ;
signal ( SIGPIPE , SIG_IGN ) ;
2011-03-06 11:49:22 -05:00
setupSignalHandlers ( ) ;
2010-06-21 18:07:48 -04:00
2010-12-09 11:10:21 -05:00
if ( server . syslog_enabled ) {
openlog ( server . syslog_ident , LOG_PID | LOG_NDELAY | LOG_NOWAIT ,
server . syslog_facility ) ;
}
2012-01-12 10:02:57 -05:00
server . current_client = NULL ;
2010-06-21 18:07:48 -04:00
server . clients = listCreate ( ) ;
2012-01-23 10:12:37 -05:00
server . clients_to_close = listCreate ( ) ;
2010-06-21 18:07:48 -04:00
server . slaves = listCreate ( ) ;
server . monitors = listCreate ( ) ;
2012-11-02 11:31:28 -04:00
server . slaveseldb = - 1 ; /* Force to emit the first SELECT command. */
2010-12-06 10:39:39 -05:00
server . unblocked_clients = listCreate ( ) ;
A reimplementation of blocking operation internals.
Redis provides support for blocking operations such as BLPOP or BRPOP.
This operations are identical to normal LPOP and RPOP operations as long
as there are elements in the target list, but if the list is empty they
block waiting for new data to arrive to the list.
All the clients blocked waiting for th same list are served in a FIFO
way, so the first that blocked is the first to be served when there is
more data pushed by another client into the list.
The previous implementation of blocking operations was conceived to
serve clients in the context of push operations. For for instance:
1) There is a client "A" blocked on list "foo".
2) The client "B" performs `LPUSH foo somevalue`.
3) The client "A" is served in the context of the "B" LPUSH,
synchronously.
Processing things in a synchronous way was useful as if "A" pushes a
value that is served by "B", from the point of view of the database is a
NOP (no operation) thing, that is, nothing is replicated, nothing is
written in the AOF file, and so forth.
However later we implemented two things:
1) Variadic LPUSH that could add multiple values to a list in the
context of a single call.
2) BRPOPLPUSH that was a version of BRPOP that also provided a "PUSH"
side effect when receiving data.
This forced us to make the synchronous implementation more complex. If
client "B" is waiting for data, and "A" pushes three elemnents in a
single call, we needed to propagate an LPUSH with a missing argument
in the AOF and replication link. We also needed to make sure to
replicate the LPUSH side of BRPOPLPUSH, but only if in turn did not
happened to serve another blocking client into another list ;)
This were complex but with a few of mutually recursive functions
everything worked as expected... until one day we introduced scripting
in Redis.
Scripting + synchronous blocking operations = Issue #614.
Basically you can't "rewrite" a script to have just a partial effect on
the replicas and AOF file if the script happened to serve a few blocked
clients.
The solution to all this problems, implemented by this commit, is to
change the way we serve blocked clients. Instead of serving the blocked
clients synchronously, in the context of the command performing the PUSH
operation, it is now an asynchronous and iterative process:
1) If a key that has clients blocked waiting for data is the subject of
a list push operation, We simply mark keys as "ready" and put it into a
queue.
2) Every command pushing stuff on lists, as a variadic LPUSH, a script,
or whatever it is, is replicated verbatim without any rewriting.
3) Every time a Redis command, a MULTI/EXEC block, or a script,
completed its execution, we run the list of keys ready to serve blocked
clients (as more data arrived), and process this list serving the
blocked clients.
4) As a result of "3" maybe more keys are ready again for other clients
(as a result of BRPOPLPUSH we may have push operations), so we iterate
back to step "3" if it's needed.
The new code has a much simpler semantics, and a simpler to understand
implementation, with the disadvantage of not being able to "optmize out"
a PUSH+BPOP as a No OP.
This commit will be tested with care before the final merge, more tests
will be added likely.
2012-09-04 04:37:49 -04:00
server . ready_keys = listCreate ( ) ;
2013-12-04 09:52:20 -05:00
server . clients_waiting_acks = listCreate ( ) ;
server . get_ack_from_slaves = 0 ;
2014-02-04 09:52:09 -05:00
server . clients_paused = 0 ;
2010-12-29 13:39:42 -05:00
2010-06-21 18:07:48 -04:00
createSharedObjects ( ) ;
2011-12-15 05:42:40 -05:00
adjustOpenFilesLimit ( ) ;
2013-06-28 11:08:03 -04:00
server . el = aeCreateEventLoop ( server . maxclients + REDIS_EVENTLOOP_FDSET_INCR ) ;
2010-06-21 18:07:48 -04:00
server . db = zmalloc ( sizeof ( redisDb ) * server . dbnum ) ;
2011-02-07 06:52:01 -05:00
2013-08-22 08:01:16 -04:00
/* Open the TCP listening socket for the user commands. */
2013-12-23 05:31:35 -05:00
if ( server . port ! = 0 & &
listenToPort ( server . port , server . ipfd , & server . ipfd_count ) = = REDIS_ERR )
2013-08-22 08:01:16 -04:00
exit ( 1 ) ;
2013-07-05 05:47:20 -04:00
/* Open the listening Unix domain socket. */
2010-10-13 11:17:56 -04:00
if ( server . unixsocket ! = NULL ) {
unlink ( server . unixsocket ) ; /* don't care if this fails */
2014-01-31 08:55:43 -05:00
server . sofd = anetUnixServer ( server . neterr , server . unixsocket ,
server . unixsocketperm , server . tcp_backlog ) ;
2010-08-03 07:33:12 -04:00
if ( server . sofd = = ANET_ERR ) {
redisLog ( REDIS_WARNING , " Opening socket: %s " , server . neterr ) ;
exit ( 1 ) ;
}
2010-08-01 16:55:24 -04:00
}
2013-07-05 05:47:20 -04:00
/* Abort if there are no listening sockets at all. */
if ( server . ipfd_count = = 0 & & server . sofd < 0 ) {
2010-08-03 07:33:12 -04:00
redisLog ( REDIS_WARNING , " Configured to not listen anywhere, exiting. " ) ;
2010-06-21 18:07:48 -04:00
exit ( 1 ) ;
}
2013-07-05 05:47:20 -04:00
/* Create the Redis databases, and initialize other internal state. */
2010-06-21 18:07:48 -04:00
for ( j = 0 ; j < server . dbnum ; j + + ) {
server . db [ j ] . dict = dictCreate ( & dbDictType , NULL ) ;
server . db [ j ] . expires = dictCreate ( & keyptrDictType , NULL ) ;
server . db [ j ] . blocking_keys = dictCreate ( & keylistDictType , NULL ) ;
A reimplementation of blocking operation internals.
Redis provides support for blocking operations such as BLPOP or BRPOP.
This operations are identical to normal LPOP and RPOP operations as long
as there are elements in the target list, but if the list is empty they
block waiting for new data to arrive to the list.
All the clients blocked waiting for th same list are served in a FIFO
way, so the first that blocked is the first to be served when there is
more data pushed by another client into the list.
The previous implementation of blocking operations was conceived to
serve clients in the context of push operations. For for instance:
1) There is a client "A" blocked on list "foo".
2) The client "B" performs `LPUSH foo somevalue`.
3) The client "A" is served in the context of the "B" LPUSH,
synchronously.
Processing things in a synchronous way was useful as if "A" pushes a
value that is served by "B", from the point of view of the database is a
NOP (no operation) thing, that is, nothing is replicated, nothing is
written in the AOF file, and so forth.
However later we implemented two things:
1) Variadic LPUSH that could add multiple values to a list in the
context of a single call.
2) BRPOPLPUSH that was a version of BRPOP that also provided a "PUSH"
side effect when receiving data.
This forced us to make the synchronous implementation more complex. If
client "B" is waiting for data, and "A" pushes three elemnents in a
single call, we needed to propagate an LPUSH with a missing argument
in the AOF and replication link. We also needed to make sure to
replicate the LPUSH side of BRPOPLPUSH, but only if in turn did not
happened to serve another blocking client into another list ;)
This were complex but with a few of mutually recursive functions
everything worked as expected... until one day we introduced scripting
in Redis.
Scripting + synchronous blocking operations = Issue #614.
Basically you can't "rewrite" a script to have just a partial effect on
the replicas and AOF file if the script happened to serve a few blocked
clients.
The solution to all this problems, implemented by this commit, is to
change the way we serve blocked clients. Instead of serving the blocked
clients synchronously, in the context of the command performing the PUSH
operation, it is now an asynchronous and iterative process:
1) If a key that has clients blocked waiting for data is the subject of
a list push operation, We simply mark keys as "ready" and put it into a
queue.
2) Every command pushing stuff on lists, as a variadic LPUSH, a script,
or whatever it is, is replicated verbatim without any rewriting.
3) Every time a Redis command, a MULTI/EXEC block, or a script,
completed its execution, we run the list of keys ready to serve blocked
clients (as more data arrived), and process this list serving the
blocked clients.
4) As a result of "3" maybe more keys are ready again for other clients
(as a result of BRPOPLPUSH we may have push operations), so we iterate
back to step "3" if it's needed.
The new code has a much simpler semantics, and a simpler to understand
implementation, with the disadvantage of not being able to "optmize out"
a PUSH+BPOP as a No OP.
This commit will be tested with care before the final merge, more tests
will be added likely.
2012-09-04 04:37:49 -04:00
server . db [ j ] . ready_keys = dictCreate ( & setDictType , NULL ) ;
2010-06-21 18:07:48 -04:00
server . db [ j ] . watched_keys = dictCreate ( & keylistDictType , NULL ) ;
2014-03-20 06:57:29 -04:00
server . db [ j ] . eviction_pool = evictionPoolAlloc ( ) ;
2010-06-21 18:07:48 -04:00
server . db [ j ] . id = j ;
2013-08-06 09:00:43 -04:00
server . db [ j ] . avg_ttl = 0 ;
2010-06-21 18:07:48 -04:00
}
server . pubsub_channels = dictCreate ( & keylistDictType , NULL ) ;
server . pubsub_patterns = listCreate ( ) ;
listSetFreeMethod ( server . pubsub_patterns , freePubsubPattern ) ;
listSetMatchMethod ( server . pubsub_patterns , listMatchPubsubPattern ) ;
server . cronloops = 0 ;
2011-12-21 06:22:13 -05:00
server . rdb_child_pid = - 1 ;
2011-12-21 06:17:02 -05:00
server . aof_child_pid = - 1 ;
Allow an AOF rewrite buffer > 2GB (Fix for issue #504).
During the AOF rewrite process, the parent process needs to accumulate
the new writes in an in-memory buffer: when the child will terminate the
AOF rewriting process this buffer (that ist the difference between the
dataset when the rewrite was started, and the current dataset) is
flushed to the new AOF file.
We used to implement this buffer using an sds.c string, but sds.c has a
2GB limit. Sometimes the dataset can be big enough, the amount of writes
so high, and the rewrite process slow enough that we overflow the 2GB
limit, causing a crash, documented on github by issue #504.
In order to prevent this from happening, this commit introduces a new
system to accumulate writes, implemented by a linked list of blocks of
10 MB each, so that we also avoid paying the reallocation cost.
Note that theoretically modern operating systems may implement realloc()
simply as a remaping of the old pages, thus with very good performances,
see for instance the mremap() syscall on Linux. However this is not
always true, and jemalloc by default avoids doing this because there are
issues with the current implementation of mremap().
For this reason we are using a linked list of blocks instead of a single
block that gets reallocated again and again.
The changes in this commit lacks testing, that will be performed before
merging into the unstable branch. This fix will not enter 2.4 because it
is too invasive. However 2.4 will log a warning when the AOF rewrite
buffer is near to the 2GB limit.
2012-05-22 07:03:41 -04:00
aofRewriteBufferReset ( ) ;
2011-12-21 06:17:02 -05:00
server . aof_buf = sdsempty ( ) ;
2013-04-02 08:05:50 -04:00
server . lastsave = time ( NULL ) ; /* At startup we consider the DB saved. */
server . lastbgsave_try = 0 ; /* At startup we never tried to BGSAVE. */
2012-05-25 06:11:30 -04:00
server . rdb_save_time_last = - 1 ;
server . rdb_save_time_start = - 1 ;
2010-06-21 18:07:48 -04:00
server . dirty = 0 ;
2014-03-19 07:55:49 -04:00
resetServerStats ( ) ;
/* A few stats we don't want to reset: server startup time, and peak mem. */
2010-06-21 18:07:48 -04:00
server . stat_starttime = time ( NULL ) ;
2011-04-21 04:49:52 -04:00
server . stat_peak_memory = 0 ;
2012-03-07 07:05:46 -05:00
server . lastbgsave_status = REDIS_OK ;
2014-02-12 06:47:10 -05:00
server . aof_last_write_status = REDIS_OK ;
server . aof_last_write_errno = 0 ;
2013-05-29 05:36:44 -04:00
server . repl_good_slaves_count = 0 ;
2014-02-13 09:09:41 -05:00
updateCachedTime ( ) ;
2013-07-05 05:47:20 -04:00
/* Create the serverCron() time event, that's our main way to process
* background operations . */
2013-02-27 05:00:47 -05:00
if ( aeCreateTimeEvent ( server . el , 1 , serverCron , NULL , NULL ) = = AE_ERR ) {
2013-02-27 06:00:11 -05:00
redisPanic ( " Can't create the serverCron time event. " ) ;
2013-02-27 05:00:47 -05:00
exit ( 1 ) ;
}
2013-07-05 05:47:20 -04:00
/* Create an event handler for accepting new connections in TCP and Unix
* domain sockets . */
for ( j = 0 ; j < server . ipfd_count ; j + + ) {
if ( aeCreateFileEvent ( server . el , server . ipfd [ j ] , AE_READABLE ,
acceptTcpHandler , NULL ) = = AE_ERR )
{
redisPanic (
" Unrecoverable error creating server.ipfd file event. " ) ;
}
}
2010-08-03 07:33:12 -04:00
if ( server . sofd > 0 & & aeCreateFileEvent ( server . el , server . sofd , AE_READABLE ,
2012-08-24 06:55:37 -04:00
acceptUnixHandler , NULL ) = = AE_ERR ) redisPanic ( " Unrecoverable error creating server.sofd file event. " ) ;
2010-06-21 18:07:48 -04:00
2013-07-05 05:47:20 -04:00
/* Open the AOF file if needed. */
2011-12-21 04:31:34 -05:00
if ( server . aof_state = = REDIS_AOF_ON ) {
2011-12-21 06:17:02 -05:00
server . aof_fd = open ( server . aof_filename ,
2011-12-21 04:31:34 -05:00
O_WRONLY | O_APPEND | O_CREAT , 0644 ) ;
2011-12-21 06:17:02 -05:00
if ( server . aof_fd = = - 1 ) {
2010-06-21 18:07:48 -04:00
redisLog ( REDIS_WARNING , " Can't open the append-only file: %s " ,
strerror ( errno ) ) ;
exit ( 1 ) ;
}
}
2012-02-02 04:17:16 -05:00
/* 32 bit instances are limited to 4GB of address space, so if there is
* no explicit limit in the user provided configuration we set a limit
2012-10-22 04:43:39 -04:00
* at 3 GB using maxmemory with ' noeviction ' policy ' . This avoids
* useless crashes of the Redis instance for out of memory . */
2012-02-02 04:17:16 -05:00
if ( server . arch_bits = = 32 & & server . maxmemory = = 0 ) {
2012-10-22 04:43:39 -04:00
redisLog ( REDIS_WARNING , " Warning: 32 bit instance detected but no memory limit set. Setting 3 GB maxmemory limit with 'noeviction' policy now. " ) ;
server . maxmemory = 3072LL * ( 1024 * 1024 ) ; /* 3 GB */
2012-02-02 04:17:16 -05:00
server . maxmemory_policy = REDIS_MAXMEMORY_NO_EVICTION ;
}
2011-03-29 11:51:15 -04:00
if ( server . cluster_enabled ) clusterInit ( ) ;
2013-06-24 13:27:49 -04:00
replicationScriptCacheInit ( ) ;
2011-04-30 11:46:52 -04:00
scriptingInit ( ) ;
2011-06-30 07:27:32 -04:00
slowlogInit ( ) ;
2011-09-13 10:10:26 -04:00
bioInit ( ) ;
2010-06-21 18:07:48 -04:00
}
2010-11-03 06:23:59 -04:00
/* Populates the Redis Command Table starting from the hard coded list
* we have on top of redis . c file . */
void populateCommandTable ( void ) {
int j ;
2011-01-24 04:56:06 -05:00
int numcommands = sizeof ( redisCommandTable ) / sizeof ( struct redisCommand ) ;
2010-11-03 06:23:59 -04:00
for ( j = 0 ; j < numcommands ; j + + ) {
2011-01-24 04:56:06 -05:00
struct redisCommand * c = redisCommandTable + j ;
2011-09-26 09:40:39 -04:00
char * f = c - > sflags ;
2013-03-06 10:28:26 -05:00
int retval1 , retval2 ;
2010-06-21 18:07:48 -04:00
2011-09-26 09:40:39 -04:00
while ( * f ! = ' \0 ' ) {
switch ( * f ) {
case ' w ' : c - > flags | = REDIS_CMD_WRITE ; break ;
case ' r ' : c - > flags | = REDIS_CMD_READONLY ; break ;
case ' m ' : c - > flags | = REDIS_CMD_DENYOOM ; break ;
case ' a ' : c - > flags | = REDIS_CMD_ADMIN ; break ;
case ' p ' : c - > flags | = REDIS_CMD_PUBSUB ; break ;
2011-09-27 07:45:46 -04:00
case ' s ' : c - > flags | = REDIS_CMD_NOSCRIPT ; break ;
case ' R ' : c - > flags | = REDIS_CMD_RANDOM ; break ;
2012-01-31 10:09:21 -05:00
case ' S ' : c - > flags | = REDIS_CMD_SORT_FOR_SCRIPT ; break ;
2012-07-22 11:13:49 -04:00
case ' l ' : c - > flags | = REDIS_CMD_LOADING ; break ;
case ' t ' : c - > flags | = REDIS_CMD_STALE ; break ;
2012-10-16 11:35:50 -04:00
case ' M ' : c - > flags | = REDIS_CMD_SKIP_MONITOR ; break ;
2013-02-20 11:28:35 -05:00
case ' k ' : c - > flags | = REDIS_CMD_ASKING ; break ;
2011-09-26 09:40:39 -04:00
default : redisPanic ( " Unsupported command flag " ) ; break ;
}
f + + ;
}
2013-03-06 10:28:26 -05:00
retval1 = dictAdd ( server . commands , sdsnew ( c - > name ) , c ) ;
/* Populate an additional dictionary that will be unaffected
* by rename - command statements in redis . conf . */
retval2 = dictAdd ( server . orig_commands , sdsnew ( c - > name ) , c ) ;
redisAssert ( retval1 = = DICT_OK & & retval2 = = DICT_OK ) ;
2010-11-03 06:23:59 -04:00
}
2010-06-21 18:07:48 -04:00
}
2011-01-24 04:56:06 -05:00
void resetCommandTableStats ( void ) {
int numcommands = sizeof ( redisCommandTable ) / sizeof ( struct redisCommand ) ;
int j ;
for ( j = 0 ; j < numcommands ; j + + ) {
struct redisCommand * c = redisCommandTable + j ;
c - > microseconds = 0 ;
c - > calls = 0 ;
}
}
2012-02-28 18:46:50 -05:00
/* ========================== Redis OP Array API ============================ */
void redisOpArrayInit ( redisOpArray * oa ) {
oa - > ops = NULL ;
oa - > numops = 0 ;
}
int redisOpArrayAppend ( redisOpArray * oa , struct redisCommand * cmd , int dbid ,
robj * * argv , int argc , int target )
{
redisOp * op ;
oa - > ops = zrealloc ( oa - > ops , sizeof ( redisOp ) * ( oa - > numops + 1 ) ) ;
op = oa - > ops + oa - > numops ;
op - > cmd = cmd ;
op - > dbid = dbid ;
op - > argv = argv ;
op - > argc = argc ;
op - > target = target ;
oa - > numops + + ;
return oa - > numops ;
}
void redisOpArrayFree ( redisOpArray * oa ) {
while ( oa - > numops ) {
int j ;
redisOp * op ;
oa - > numops - - ;
op = oa - > ops + oa - > numops ;
for ( j = 0 ; j < op - > argc ; j + + )
decrRefCount ( op - > argv [ j ] ) ;
zfree ( op - > argv ) ;
}
zfree ( oa - > ops ) ;
}
2010-06-21 18:07:48 -04:00
/* ====================== Commands lookup and execution ===================== */
2010-11-03 06:23:59 -04:00
struct redisCommand * lookupCommand ( sds name ) {
return dictFetchValue ( server . commands , name ) ;
}
struct redisCommand * lookupCommandByCString ( char * s ) {
struct redisCommand * cmd ;
sds name = sdsnew ( s ) ;
cmd = dictFetchValue ( server . commands , name ) ;
sdsfree ( name ) ;
return cmd ;
2010-06-21 18:07:48 -04:00
}
2013-03-06 10:28:26 -05:00
/* Lookup the command in the current table, if not found also check in
* the original table containing the original command names unaffected by
* redis . conf rename - command statement .
*
* This is used by functions rewriting the argument vector such as
* rewriteClientCommandVector ( ) in order to set client - > cmd pointer
* correctly even if the command was renamed . */
struct redisCommand * lookupCommandOrOriginal ( sds name ) {
struct redisCommand * cmd = dictFetchValue ( server . commands , name ) ;
if ( ! cmd ) cmd = dictFetchValue ( server . orig_commands , name ) ;
return cmd ;
}
2012-02-28 10:17:00 -05:00
/* Propagate the specified command (in the context of the specified database id)
2012-10-16 11:35:50 -04:00
* to AOF and Slaves .
2012-02-28 10:17:00 -05:00
*
* flags are an xor between :
* + REDIS_PROPAGATE_NONE ( no propagation of command at all )
* + REDIS_PROPAGATE_AOF ( propagate into the AOF file if is enabled )
* + REDIS_PROPAGATE_REPL ( propagate into the replication link )
*/
void propagate ( struct redisCommand * cmd , int dbid , robj * * argv , int argc ,
int flags )
{
if ( server . aof_state ! = REDIS_AOF_OFF & & flags & REDIS_PROPAGATE_AOF )
feedAppendOnlyFile ( cmd , dbid , argv , argc ) ;
2013-01-30 12:33:16 -05:00
if ( flags & REDIS_PROPAGATE_REPL )
2012-02-28 10:17:00 -05:00
replicationFeedSlaves ( server . slaves , dbid , argv , argc ) ;
}
2012-02-28 18:46:50 -05:00
/* Used inside commands to schedule the propagation of additional commands
* after the current command is propagated to AOF / Replication . */
2012-02-28 12:03:08 -05:00
void alsoPropagate ( struct redisCommand * cmd , int dbid , robj * * argv , int argc ,
int target )
{
2012-02-28 18:46:50 -05:00
redisOpArrayAppend ( & server . also_propagate , cmd , dbid , argv , argc , target ) ;
2012-02-28 12:03:08 -05:00
}
2013-06-21 06:07:53 -04:00
/* It is possible to call the function forceCommandPropagation() inside a
* Redis command implementaiton in order to to force the propagation of a
* specific command execution into AOF / Replication . */
void forceCommandPropagation ( redisClient * c , int flags ) {
if ( flags & REDIS_PROPAGATE_REPL ) c - > flags | = REDIS_FORCE_REPL ;
if ( flags & REDIS_PROPAGATE_AOF ) c - > flags | = REDIS_FORCE_AOF ;
}
2010-06-21 18:07:48 -04:00
/* Call() is the core of Redis execution of a command */
2012-02-02 10:30:52 -05:00
void call ( redisClient * c , int flags ) {
2014-02-07 12:29:20 -05:00
long long dirty , start , duration ;
2013-06-21 06:07:53 -04:00
int client_old_flags = c - > flags ;
2010-06-21 18:07:48 -04:00
2012-02-28 10:17:00 -05:00
/* Sent the command to clients in MONITOR mode, only if the commands are
2013-01-16 12:00:20 -05:00
* not generated from reading an AOF . */
2012-10-16 11:35:50 -04:00
if ( listLength ( server . monitors ) & &
! server . loading & &
! ( c - > cmd - > flags & REDIS_CMD_SKIP_MONITOR ) )
{
2012-03-07 06:12:15 -05:00
replicationFeedMonitors ( c , server . monitors , c - > db - > id , c - > argv , c - > argc ) ;
2012-10-16 11:35:50 -04:00
}
2012-02-28 10:17:00 -05:00
/* Call the command. */
2013-06-21 06:07:53 -04:00
c - > flags & = ~ ( REDIS_FORCE_AOF | REDIS_FORCE_REPL ) ;
2012-02-28 18:46:50 -05:00
redisOpArrayInit ( & server . also_propagate ) ;
2010-06-21 18:07:48 -04:00
dirty = server . dirty ;
2014-02-07 12:29:20 -05:00
start = ustime ( ) ;
2011-07-08 06:59:30 -04:00
c - > cmd - > proc ( c ) ;
2011-06-30 07:27:32 -04:00
duration = ustime ( ) - start ;
2014-02-07 12:29:20 -05:00
dirty = server . dirty - dirty ;
2012-02-02 10:30:52 -05:00
/* When EVAL is called loading the AOF we don't want commands called
* from Lua to go into the slowlog or to populate statistics . */
if ( server . loading & & c - > flags & REDIS_LUA_CLIENT )
flags & = ~ ( REDIS_CALL_SLOWLOG | REDIS_CALL_STATS ) ;
2013-06-21 06:07:53 -04:00
/* If the caller is Lua, we want to force the EVAL caller to propagate
* the script if the command flag or client flag are forcing the
* propagation . */
if ( c - > flags & REDIS_LUA_CLIENT & & server . lua_caller ) {
if ( c - > flags & REDIS_FORCE_REPL )
server . lua_caller - > flags | = REDIS_FORCE_REPL ;
if ( c - > flags & REDIS_FORCE_AOF )
server . lua_caller - > flags | = REDIS_FORCE_AOF ;
}
2012-02-28 10:17:00 -05:00
/* Log the command into the Slow log if needed, and populate the
* per - command statistics that we show in INFO commandstats . */
2013-01-19 06:52:05 -05:00
if ( flags & REDIS_CALL_SLOWLOG & & c - > cmd - > proc ! = execCommand )
2012-02-02 10:30:52 -05:00
slowlogPushEntryIfNeeded ( c - > argv , c - > argc , duration ) ;
if ( flags & REDIS_CALL_STATS ) {
c - > cmd - > microseconds + = duration ;
c - > cmd - > calls + + ;
}
2012-02-28 10:17:00 -05:00
/* Propagate the command into the AOF and replication link */
2012-02-02 10:30:52 -05:00
if ( flags & REDIS_CALL_PROPAGATE ) {
2012-02-28 10:17:00 -05:00
int flags = REDIS_PROPAGATE_NONE ;
2013-06-21 06:07:53 -04:00
if ( c - > flags & REDIS_FORCE_REPL ) flags | = REDIS_PROPAGATE_REPL ;
if ( c - > flags & REDIS_FORCE_AOF ) flags | = REDIS_PROPAGATE_AOF ;
2012-02-28 10:17:00 -05:00
if ( dirty )
flags | = ( REDIS_PROPAGATE_REPL | REDIS_PROPAGATE_AOF ) ;
if ( flags ! = REDIS_PROPAGATE_NONE )
propagate ( c - > cmd , c - > db - > id , c - > argv , c - > argc , flags ) ;
2012-02-02 10:30:52 -05:00
}
2013-01-10 05:19:40 -05:00
2013-06-21 06:07:53 -04:00
/* Restore the old FORCE_AOF/REPL flags, since call can be executed
* recursively . */
c - > flags & = ~ ( REDIS_FORCE_AOF | REDIS_FORCE_REPL ) ;
c - > flags | = client_old_flags & ( REDIS_FORCE_AOF | REDIS_FORCE_REPL ) ;
2013-01-10 05:19:40 -05:00
/* Handle the alsoPropagate() API to handle commands that want to propagate
* multiple separated commands . */
2012-02-28 18:46:50 -05:00
if ( server . also_propagate . numops ) {
2012-02-28 12:03:08 -05:00
int j ;
2012-02-28 18:46:50 -05:00
redisOp * rop ;
2012-02-28 12:03:08 -05:00
2012-02-28 18:46:50 -05:00
for ( j = 0 ; j < server . also_propagate . numops ; j + + ) {
rop = & server . also_propagate . ops [ j ] ;
propagate ( rop - > cmd , rop - > dbid , rop - > argv , rop - > argc , rop - > target ) ;
}
redisOpArrayFree ( & server . also_propagate ) ;
2012-02-28 12:03:08 -05:00
}
2010-06-21 18:07:48 -04:00
server . stat_numcommands + + ;
}
/* If this function gets called we already read a whole
2012-11-01 06:14:55 -04:00
* command , arguments are in the client argv / argc fields .
2010-06-21 18:07:48 -04:00
* processCommand ( ) execute the command or prepare the
* server for a bulk read from the client .
*
* If 1 is returned the client is still alive and valid and
2013-01-16 12:00:20 -05:00
* other operations can be performed by the caller . Otherwise
* if 0 is returned the client was destroyed ( i . e . after QUIT ) . */
2010-06-21 18:07:48 -04:00
int processCommand ( redisClient * c ) {
2010-10-13 05:25:40 -04:00
/* The QUIT command is handled separately. Normal command procs will
* go through checking for replication and QUIT will cause trouble
* when FORCE_REPLICATION is enabled and would be implemented in
* a regular command proc . */
2010-06-21 18:07:48 -04:00
if ( ! strcasecmp ( c - > argv [ 0 ] - > ptr , " quit " ) ) {
2010-10-13 05:25:40 -04:00
addReply ( c , shared . ok ) ;
2010-10-28 10:07:45 -04:00
c - > flags | = REDIS_CLOSE_AFTER_REPLY ;
2010-10-15 09:40:25 -04:00
return REDIS_ERR ;
2010-06-21 18:07:48 -04:00
}
/* Now lookup the command and check ASAP about trivial error conditions
2011-07-08 06:59:30 -04:00
* such as wrong arity , bad command name and so forth . */
2011-11-24 08:56:34 -05:00
c - > cmd = c - > lastcmd = lookupCommand ( c - > argv [ 0 ] - > ptr ) ;
2011-07-08 06:59:30 -04:00
if ( ! c - > cmd ) {
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2010-09-02 13:52:24 -04:00
addReplyErrorFormat ( c , " unknown command '%s' " ,
( char * ) c - > argv [ 0 ] - > ptr ) ;
2010-10-15 09:40:25 -04:00
return REDIS_OK ;
2011-07-08 06:59:30 -04:00
} else if ( ( c - > cmd - > arity > 0 & & c - > cmd - > arity ! = c - > argc ) | |
( c - > argc < - c - > cmd - > arity ) ) {
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2010-09-02 13:52:24 -04:00
addReplyErrorFormat ( c , " wrong number of arguments for '%s' command " ,
2011-07-08 06:59:30 -04:00
c - > cmd - > name ) ;
2010-10-15 09:40:25 -04:00
return REDIS_OK ;
2010-06-21 18:07:48 -04:00
}
/* Check if the user is authenticated */
2011-07-08 06:59:30 -04:00
if ( server . requirepass & & ! c - > authenticated & & c - > cmd - > proc ! = authCommand )
{
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2013-02-12 10:25:41 -05:00
addReply ( c , shared . noautherr ) ;
2010-10-15 09:40:25 -04:00
return REDIS_OK ;
2010-06-21 18:07:48 -04:00
}
2013-03-05 07:02:44 -05:00
/* If cluster is enabled perform the cluster redirection here.
* However we don ' t perform the redirection if :
* 1 ) The sender of this command is our master .
* 2 ) The command has no key arguments . */
2011-03-29 11:51:15 -04:00
if ( server . cluster_enabled & &
2013-03-05 07:02:44 -05:00
! ( c - > flags & REDIS_MASTER ) & &
! ( c - > cmd - > getkeys_proc = = NULL & & c - > cmd - > firstkey = = 0 ) )
{
2011-03-29 11:51:15 -04:00
int hashslot ;
2013-02-14 07:20:56 -05:00
if ( server . cluster - > state ! = REDIS_CLUSTER_OK ) {
2014-03-11 10:19:00 -04:00
flagTransaction ( c ) ;
2013-02-15 10:53:24 -05:00
addReplySds ( c , sdsnew ( " -CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information \r \n " ) ) ;
2011-03-29 11:51:15 -04:00
return REDIS_OK ;
} else {
2014-03-07 07:19:09 -05:00
int error_code ;
clusterNode * n = getNodeByQuery ( c , c - > cmd , c - > argv , c - > argc , & hashslot , & error_code ) ;
2011-03-29 11:51:15 -04:00
if ( n = = NULL ) {
2014-03-11 10:19:00 -04:00
flagTransaction ( c ) ;
2014-03-07 07:19:09 -05:00
if ( error_code = = REDIS_CLUSTER_REDIR_CROSS_SLOT ) {
addReplySds ( c , sdsnew ( " -CROSSSLOT Keys in request don't hash to the same slot \r \n " ) ) ;
} else if ( error_code = = REDIS_CLUSTER_REDIR_UNSTABLE ) {
/* The request spawns mutliple keys in the same slot,
* but the slot is not " stable " currently as there is
* a migration or import in progress . */
addReplySds ( c , sdsnew ( " -TRYAGAIN Multiple keys request during rehashing of slot \r \n " ) ) ;
} else {
redisPanic ( " getNodeByQuery() unknown error. " ) ;
}
2011-03-29 11:51:15 -04:00
return REDIS_OK ;
2013-02-14 07:20:56 -05:00
} else if ( n ! = server . cluster - > myself ) {
2014-03-03 11:11:51 -05:00
flagTransaction ( c ) ;
2011-03-29 11:51:15 -04:00
addReplySds ( c , sdscatprintf ( sdsempty ( ) ,
2014-03-07 07:19:09 -05:00
" -%s %d %s:%d \r \n " ,
( error_code = = REDIS_CLUSTER_REDIR_ASK ) ? " ASK " : " MOVED " ,
2011-05-05 05:13:21 -04:00
hashslot , n - > ip , n - > port ) ) ;
2011-03-29 11:51:15 -04:00
return REDIS_OK ;
}
}
}
2010-10-11 07:05:09 -04:00
/* Handle the maxmemory directive.
*
* First we try to free some memory if possible ( if there are volatile
* keys in the dataset ) . If there are not the only thing we can do
* is returning an error . */
2012-02-04 08:05:54 -05:00
if ( server . maxmemory ) {
int retval = freeMemoryIfNeeded ( ) ;
if ( ( c - > cmd - > flags & REDIS_CMD_DENYOOM ) & & retval = = REDIS_ERR ) {
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2012-03-20 12:32:48 -04:00
addReply ( c , shared . oomerr ) ;
2012-02-04 08:05:54 -05:00
return REDIS_OK ;
}
2010-06-21 18:07:48 -04:00
}
2013-11-28 10:25:49 -05:00
/* Don't accept write commands if there are problems persisting on disk
* and if this is a master instance . */
2014-02-12 06:47:10 -05:00
if ( ( ( server . stop_writes_on_bgsave_err & &
server . saveparamslen > 0 & &
server . lastbgsave_status = = REDIS_ERR ) | |
server . aof_last_write_status = = REDIS_ERR ) & &
2014-01-28 04:10:56 -05:00
server . masterhost = = NULL & &
2013-11-28 10:16:58 -05:00
( c - > cmd - > flags & REDIS_CMD_WRITE | |
c - > cmd - > proc = = pingCommand ) )
2012-03-07 07:05:46 -05:00
{
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2014-02-12 06:47:10 -05:00
if ( server . aof_last_write_status = = REDIS_OK )
addReply ( c , shared . bgsaveerr ) ;
else
addReplySds ( c ,
sdscatprintf ( sdsempty ( ) ,
" -MISCONF Errors writing to the AOF file: %s \r \n " ,
strerror ( server . aof_last_write_errno ) ) ) ;
2012-03-07 07:05:46 -05:00
return REDIS_OK ;
}
2013-05-29 05:36:44 -04:00
/* Don't accept write commands if there are not enough good slaves and
2013-11-28 10:25:49 -05:00
* user configured the min - slaves - to - write option . */
2013-05-29 05:45:40 -04:00
if ( server . repl_min_slaves_to_write & &
server . repl_min_slaves_max_lag & &
c - > cmd - > flags & REDIS_CMD_WRITE & &
2013-05-29 05:36:44 -04:00
server . repl_good_slaves_count < server . repl_min_slaves_to_write )
{
flagTransaction ( c ) ;
addReply ( c , shared . noreplicaserr ) ;
return REDIS_OK ;
}
2012-07-22 11:13:49 -04:00
/* Don't accept write commands if this is a read only slave. But
2012-03-20 12:32:48 -04:00
* accept write commands if this is our master . */
if ( server . masterhost & & server . repl_slave_ro & &
! ( c - > flags & REDIS_MASTER ) & &
c - > cmd - > flags & REDIS_CMD_WRITE )
{
addReply ( c , shared . roslaveerr ) ;
return REDIS_OK ;
}
2010-06-21 18:07:48 -04:00
/* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */
if ( ( dictSize ( c - > pubsub_channels ) > 0 | | listLength ( c - > pubsub_patterns ) > 0 )
& &
2011-07-08 06:59:30 -04:00
c - > cmd - > proc ! = subscribeCommand & &
c - > cmd - > proc ! = unsubscribeCommand & &
c - > cmd - > proc ! = psubscribeCommand & &
c - > cmd - > proc ! = punsubscribeCommand ) {
2010-09-02 13:52:24 -04:00
addReplyError ( c , " only (P)SUBSCRIBE / (P)UNSUBSCRIBE / QUIT allowed in this context " ) ;
2010-10-15 09:40:25 -04:00
return REDIS_OK ;
2010-06-21 18:07:48 -04:00
}
2010-11-04 14:59:21 -04:00
/* Only allow INFO and SLAVEOF when slave-serve-stale-data is no and
* we are a slave with a broken link with master . */
2011-12-21 06:23:18 -05:00
if ( server . masterhost & & server . repl_state ! = REDIS_REPL_CONNECTED & &
2010-11-04 14:59:21 -04:00
server . repl_serve_stale_data = = 0 & &
2012-07-22 11:13:49 -04:00
! ( c - > cmd - > flags & REDIS_CMD_STALE ) )
2010-11-04 14:59:21 -04:00
{
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2012-05-02 11:14:45 -04:00
addReply ( c , shared . masterdownerr ) ;
2010-11-04 14:59:21 -04:00
return REDIS_OK ;
}
2012-07-22 11:13:49 -04:00
/* Loading DB? Return an error if the command has not the
* REDIS_CMD_LOADING flag . */
if ( server . loading & & ! ( c - > cmd - > flags & REDIS_CMD_LOADING ) ) {
2010-11-08 05:52:03 -05:00
addReply ( c , shared . loadingerr ) ;
return REDIS_OK ;
}
2013-05-28 09:23:42 -04:00
/* Lua script too slow? Only allow a limited number of commands. */
2011-11-18 08:10:48 -05:00
if ( server . lua_timedout & &
2012-10-11 12:34:05 -04:00
c - > cmd - > proc ! = authCommand & &
2013-05-28 09:23:42 -04:00
c - > cmd - > proc ! = replconfCommand & &
2012-04-19 17:35:15 -04:00
! ( c - > cmd - > proc = = shutdownCommand & &
2011-11-18 08:10:48 -05:00
c - > argc = = 2 & &
tolower ( ( ( char * ) c - > argv [ 1 ] - > ptr ) [ 0 ] ) = = ' n ' ) & &
! ( c - > cmd - > proc = = scriptCommand & &
c - > argc = = 2 & &
tolower ( ( ( char * ) c - > argv [ 1 ] - > ptr ) [ 0 ] ) = = ' k ' ) )
{
2012-11-15 14:11:05 -05:00
flagTransaction ( c ) ;
2011-10-27 08:49:10 -04:00
addReply ( c , shared . slowscripterr ) ;
return REDIS_OK ;
}
2010-06-21 18:07:48 -04:00
/* Exec the command */
if ( c - > flags & REDIS_MULTI & &
2011-07-08 06:59:30 -04:00
c - > cmd - > proc ! = execCommand & & c - > cmd - > proc ! = discardCommand & &
c - > cmd - > proc ! = multiCommand & & c - > cmd - > proc ! = watchCommand )
2010-06-21 18:07:48 -04:00
{
2011-07-08 06:59:30 -04:00
queueMultiCommand ( c ) ;
2010-06-21 18:07:48 -04:00
addReply ( c , shared . queued ) ;
} else {
2012-02-02 10:30:52 -05:00
call ( c , REDIS_CALL_FULL ) ;
2013-12-04 09:52:20 -05:00
c - > woff = server . master_repl_offset ;
A reimplementation of blocking operation internals.
Redis provides support for blocking operations such as BLPOP or BRPOP.
This operations are identical to normal LPOP and RPOP operations as long
as there are elements in the target list, but if the list is empty they
block waiting for new data to arrive to the list.
All the clients blocked waiting for th same list are served in a FIFO
way, so the first that blocked is the first to be served when there is
more data pushed by another client into the list.
The previous implementation of blocking operations was conceived to
serve clients in the context of push operations. For for instance:
1) There is a client "A" blocked on list "foo".
2) The client "B" performs `LPUSH foo somevalue`.
3) The client "A" is served in the context of the "B" LPUSH,
synchronously.
Processing things in a synchronous way was useful as if "A" pushes a
value that is served by "B", from the point of view of the database is a
NOP (no operation) thing, that is, nothing is replicated, nothing is
written in the AOF file, and so forth.
However later we implemented two things:
1) Variadic LPUSH that could add multiple values to a list in the
context of a single call.
2) BRPOPLPUSH that was a version of BRPOP that also provided a "PUSH"
side effect when receiving data.
This forced us to make the synchronous implementation more complex. If
client "B" is waiting for data, and "A" pushes three elemnents in a
single call, we needed to propagate an LPUSH with a missing argument
in the AOF and replication link. We also needed to make sure to
replicate the LPUSH side of BRPOPLPUSH, but only if in turn did not
happened to serve another blocking client into another list ;)
This were complex but with a few of mutually recursive functions
everything worked as expected... until one day we introduced scripting
in Redis.
Scripting + synchronous blocking operations = Issue #614.
Basically you can't "rewrite" a script to have just a partial effect on
the replicas and AOF file if the script happened to serve a few blocked
clients.
The solution to all this problems, implemented by this commit, is to
change the way we serve blocked clients. Instead of serving the blocked
clients synchronously, in the context of the command performing the PUSH
operation, it is now an asynchronous and iterative process:
1) If a key that has clients blocked waiting for data is the subject of
a list push operation, We simply mark keys as "ready" and put it into a
queue.
2) Every command pushing stuff on lists, as a variadic LPUSH, a script,
or whatever it is, is replicated verbatim without any rewriting.
3) Every time a Redis command, a MULTI/EXEC block, or a script,
completed its execution, we run the list of keys ready to serve blocked
clients (as more data arrived), and process this list serving the
blocked clients.
4) As a result of "3" maybe more keys are ready again for other clients
(as a result of BRPOPLPUSH we may have push operations), so we iterate
back to step "3" if it's needed.
The new code has a much simpler semantics, and a simpler to understand
implementation, with the disadvantage of not being able to "optmize out"
a PUSH+BPOP as a No OP.
This commit will be tested with care before the final merge, more tests
will be added likely.
2012-09-04 04:37:49 -04:00
if ( listLength ( server . ready_keys ) )
handleClientsBlockedOnLists ( ) ;
2010-06-21 18:07:48 -04:00
}
2010-10-15 09:40:25 -04:00
return REDIS_OK ;
2010-06-21 18:07:48 -04:00
}
/*================================== Shutdown =============================== */
2013-07-05 05:47:20 -04:00
/* Close listening sockets. Also unlink the unix domain socket if
* unlink_unix_socket is non - zero . */
void closeListeningSockets ( int unlink_unix_socket ) {
int j ;
for ( j = 0 ; j < server . ipfd_count ; j + + ) close ( server . ipfd [ j ] ) ;
if ( server . sofd ! = - 1 ) close ( server . sofd ) ;
if ( server . cluster_enabled )
for ( j = 0 ; j < server . cfd_count ; j + + ) close ( server . cfd [ j ] ) ;
if ( unlink_unix_socket & & server . unixsocket ) {
redisLog ( REDIS_NOTICE , " Removing the unix socket file. " ) ;
unlink ( server . unixsocket ) ; /* don't care if this fails */
}
}
2011-11-18 08:10:48 -05:00
int prepareForShutdown ( int flags ) {
int save = flags & REDIS_SHUTDOWN_SAVE ;
int nosave = flags & REDIS_SHUTDOWN_NOSAVE ;
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_WARNING , " User requested shutdown... " ) ;
2010-06-21 18:07:48 -04:00
/* Kill the saving child if there is a background saving in progress.
We want to avoid race conditions , for instance our saving child may
overwrite the synchronous saving did by SHUTDOWN . */
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid ! = - 1 ) {
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_WARNING , " There is a child saving an .rdb. Killing it! " ) ;
2013-01-14 04:29:14 -05:00
kill ( server . rdb_child_pid , SIGUSR1 ) ;
2011-12-21 06:22:13 -05:00
rdbRemoveTempFile ( server . rdb_child_pid ) ;
2010-06-21 18:07:48 -04:00
}
2011-12-21 04:31:34 -05:00
if ( server . aof_state ! = REDIS_AOF_OFF ) {
2011-07-22 05:52:21 -04:00
/* Kill the AOF saving child as the AOF we already have may be longer
* but contains the full dataset anyway . */
2011-12-21 06:17:02 -05:00
if ( server . aof_child_pid ! = - 1 ) {
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_WARNING ,
" There is a child rewriting the AOF. Killing it! " ) ;
2013-01-14 04:29:14 -05:00
kill ( server . aof_child_pid , SIGUSR1 ) ;
2011-07-22 05:52:21 -04:00
}
2010-06-21 18:07:48 -04:00
/* Append only file: fsync() the AOF and exit */
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_NOTICE , " Calling fsync() on the AOF file. " ) ;
2011-12-21 06:17:02 -05:00
aof_fsync ( server . aof_fd ) ;
2011-07-22 05:52:21 -04:00
}
2011-11-18 08:10:48 -05:00
if ( ( server . saveparamslen > 0 & & ! nosave ) | | save ) {
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_NOTICE , " Saving the final RDB snapshot before exiting. " ) ;
2010-06-21 18:07:48 -04:00
/* Snapshotting. Perform a SYNC SAVE and exit */
2011-12-21 06:22:13 -05:00
if ( rdbSave ( server . rdb_filename ) ! = REDIS_OK ) {
2010-06-21 18:07:48 -04:00
/* Ooops.. error saving! The best we can do is to continue
* operating . Note that if there was a background saving process ,
* in the next cron ( ) Redis will be notified that the background
* saving aborted , handling special stuff like slaves pending for
* synchronization . . . */
2011-07-22 05:52:21 -04:00
redisLog ( REDIS_WARNING , " Error trying to save the DB, can't exit. " ) ;
2010-06-21 18:07:48 -04:00
return REDIS_ERR ;
}
}
2011-07-22 05:52:21 -04:00
if ( server . daemonize ) {
redisLog ( REDIS_NOTICE , " Removing the pid file. " ) ;
unlink ( server . pidfile ) ;
}
2011-07-22 06:11:40 -04:00
/* Close the listening sockets. Apparently this allows faster restarts. */
2013-07-05 05:47:20 -04:00
closeListeningSockets ( 1 ) ;
2014-02-07 05:22:24 -05:00
redisLog ( REDIS_WARNING , " %s is now ready to exit, bye bye... " ,
server . sentinel_mode ? " Sentinel " : " Redis " ) ;
2010-06-21 18:07:48 -04:00
return REDIS_OK ;
}
/*================================== Commands =============================== */
2012-06-21 08:25:53 -04:00
/* Return zero if strings are the same, non-zero if they are not.
2012-06-21 05:50:01 -04:00
* The comparison is performed in a way that prevents an attacker to obtain
* information about the nature of the strings just monitoring the execution
* time of the function .
*
* Note that limiting the comparison length to strings up to 512 bytes we
* can avoid leaking any information about the password length and any
* possible branch misprediction related leak .
*/
int time_independent_strcmp ( char * a , char * b ) {
char bufa [ REDIS_AUTHPASS_MAX_LEN ] , bufb [ REDIS_AUTHPASS_MAX_LEN ] ;
/* The above two strlen perform len(a) + len(b) operations where either
* a or b are fixed ( our password ) length , and the difference is only
* relative to the length of the user provided string , so no information
* leak is possible in the following two lines of code . */
int alen = strlen ( a ) ;
int blen = strlen ( b ) ;
int j ;
int diff = 0 ;
/* We can't compare strings longer than our static buffers.
* Note that this will never pass the first test in practical circumstances
* so there is no info leak . */
if ( alen > sizeof ( bufa ) | | blen > sizeof ( bufb ) ) return 1 ;
memset ( bufa , 0 , sizeof ( bufa ) ) ; /* Constant time. */
memset ( bufb , 0 , sizeof ( bufb ) ) ; /* Constant time. */
/* Again the time of the following two copies is proportional to
* len ( a ) + len ( b ) so no info is leaked . */
memcpy ( bufa , a , alen ) ;
memcpy ( bufb , b , blen ) ;
/* Always compare all the chars in the two buffers without
* conditional expressions . */
for ( j = 0 ; j < sizeof ( bufa ) ; j + + ) {
diff | = ( bufa [ j ] ^ bufb [ j ] ) ;
}
/* Length must be equal as well. */
diff | = alen ^ blen ;
return diff ; /* If zero strings are the same. */
}
2010-06-21 18:07:48 -04:00
void authCommand ( redisClient * c ) {
2011-10-10 16:21:17 -04:00
if ( ! server . requirepass ) {
addReplyError ( c , " Client sent AUTH, but no password is set " ) ;
2012-06-21 05:50:01 -04:00
} else if ( ! time_independent_strcmp ( c - > argv [ 1 ] - > ptr , server . requirepass ) ) {
2010-06-21 18:07:48 -04:00
c - > authenticated = 1 ;
addReply ( c , shared . ok ) ;
} else {
c - > authenticated = 0 ;
2010-09-02 13:52:24 -04:00
addReplyError ( c , " invalid password " ) ;
2010-06-21 18:07:48 -04:00
}
}
void pingCommand ( redisClient * c ) {
addReply ( c , shared . pong ) ;
}
void echoCommand ( redisClient * c ) {
addReplyBulk ( c , c - > argv [ 1 ] ) ;
}
2012-03-07 04:38:01 -05:00
void timeCommand ( redisClient * c ) {
struct timeval tv ;
2013-01-16 12:00:20 -05:00
/* gettimeofday() can only fail if &tv is a bad address so we
2012-03-07 04:38:01 -05:00
* don ' t check for errors . */
gettimeofday ( & tv , NULL ) ;
addReplyMultiBulkLen ( c , 2 ) ;
addReplyBulkLongLong ( c , tv . tv_sec ) ;
addReplyBulkLongLong ( c , tv . tv_usec ) ;
}
2010-06-21 18:07:48 -04:00
/* Convert an amount of bytes into a human readable string in the form
* of 100 B , 2 G , 100 M , 4 K , and so forth . */
void bytesToHuman ( char * s , unsigned long long n ) {
double d ;
if ( n < 1024 ) {
/* Bytes */
sprintf ( s , " %lluB " , n ) ;
return ;
} else if ( n < ( 1024 * 1024 ) ) {
d = ( double ) n / ( 1024 ) ;
sprintf ( s , " %.2fK " , d ) ;
} else if ( n < ( 1024LL * 1024 * 1024 ) ) {
d = ( double ) n / ( 1024 * 1024 ) ;
sprintf ( s , " %.2fM " , d ) ;
} else if ( n < ( 1024LL * 1024 * 1024 * 1024 ) ) {
d = ( double ) n / ( 1024LL * 1024 * 1024 ) ;
sprintf ( s , " %.2fG " , d ) ;
}
}
/* Create the string returned by the INFO command. This is decoupled
* by the INFO command itself as we need to report the same information
* on memory corruption problems . */
2011-01-23 10:41:07 -05:00
sds genRedisInfoString ( char * section ) {
sds info = sdsempty ( ) ;
2012-03-27 11:39:58 -04:00
time_t uptime = server . unixtime - server . stat_starttime ;
2011-01-23 05:46:34 -05:00
int j , numcommands ;
2010-09-16 07:28:58 -04:00
struct rusage self_ru , c_ru ;
2011-01-14 04:20:02 -05:00
unsigned long lol , bib ;
2011-01-23 10:41:07 -05:00
int allsections = 0 , defsections = 0 ;
int sections = 0 ;
2012-08-29 05:44:01 -04:00
2011-01-23 10:41:07 -05:00
if ( section ) {
allsections = strcasecmp ( section , " all " ) = = 0 ;
2011-01-23 16:14:15 -05:00
defsections = strcasecmp ( section , " default " ) = = 0 ;
2011-01-23 10:41:07 -05:00
}
2010-09-16 07:28:58 -04:00
getrusage ( RUSAGE_SELF , & self_ru ) ;
getrusage ( RUSAGE_CHILDREN , & c_ru ) ;
2011-01-14 04:20:02 -05:00
getClientsMaxBuffers ( & lol , & bib ) ;
2011-01-23 10:41:07 -05:00
/* Server */
if ( allsections | | defsections | | ! strcasecmp ( section , " server " ) ) {
2012-04-04 09:38:13 -04:00
struct utsname name ;
2012-08-29 05:44:01 -04:00
char * mode ;
2012-04-04 09:38:13 -04:00
2012-08-29 05:44:01 -04:00
if ( server . cluster_enabled ) mode = " cluster " ;
else if ( server . sentinel_mode ) mode = " sentinel " ;
else mode = " standalone " ;
2011-01-23 10:41:07 -05:00
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
2012-04-04 09:38:13 -04:00
uname ( & name ) ;
2010-06-21 18:07:48 -04:00
info = sdscatprintf ( info ,
2011-01-23 10:41:07 -05:00
" # Server \r \n "
" redis_version:%s \r \n "
" redis_git_sha1:%s \r \n "
" redis_git_dirty:%d \r \n "
2012-11-29 08:20:08 -05:00
" redis_build_id:%llx \r \n "
2012-08-29 05:44:01 -04:00
" redis_mode:%s \r \n "
2012-04-04 09:38:13 -04:00
" os:%s %s %s \r \n "
2012-02-02 04:02:40 -05:00
" arch_bits:%d \r \n "
2011-01-23 10:41:07 -05:00
" multiplexing_api:%s \r \n "
2012-01-10 12:37:16 -05:00
" gcc_version:%d.%d.%d \r \n "
2011-01-23 10:41:07 -05:00
" process_id:%ld \r \n "
2012-03-08 04:13:12 -05:00
" run_id:%s \r \n "
2011-01-23 10:41:07 -05:00
" tcp_port:%d \r \n "
2012-10-28 00:33:04 -04:00
" uptime_in_seconds:%jd \r \n "
" uptime_in_days:%jd \r \n "
2012-12-14 11:10:40 -05:00
" hz:%d \r \n "
2013-05-09 10:57:59 -04:00
" lru_clock:%ld \r \n "
" config_file:%s \r \n " ,
2011-01-23 10:41:07 -05:00
REDIS_VERSION ,
redisGitSHA1 ( ) ,
strtol ( redisGitDirty ( ) , NULL , 10 ) > 0 ,
2013-02-27 06:33:27 -05:00
( unsigned long long ) redisBuildId ( ) ,
2012-08-29 05:44:01 -04:00
mode ,
2012-04-04 09:38:13 -04:00
name . sysname , name . release , name . machine ,
2012-02-02 04:02:40 -05:00
server . arch_bits ,
2011-01-23 10:41:07 -05:00
aeGetApiName ( ) ,
2012-01-10 12:37:16 -05:00
# ifdef __GNUC__
__GNUC__ , __GNUC_MINOR__ , __GNUC_PATCHLEVEL__ ,
# else
0 , 0 , 0 ,
# endif
2011-01-23 10:41:07 -05:00
( long ) getpid ( ) ,
2012-03-08 04:13:12 -05:00
server . runid ,
2011-01-23 10:41:07 -05:00
server . port ,
2012-10-28 00:33:04 -04:00
( intmax_t ) uptime ,
( intmax_t ) ( uptime / ( 3600 * 24 ) ) ,
2012-12-14 11:10:40 -05:00
server . hz ,
2013-05-09 10:57:59 -04:00
( unsigned long ) server . lruclock ,
server . configfile ? server . configfile : " " ) ;
2011-01-23 10:41:07 -05:00
}
/* Clients */
if ( allsections | | defsections | | ! strcasecmp ( section , " clients " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info ,
" # Clients \r \n "
2012-01-31 04:35:52 -05:00
" connected_clients:%lu \r \n "
2011-01-23 10:41:07 -05:00
" client_longest_output_list:%lu \r \n "
" client_biggest_input_buf:%lu \r \n "
" blocked_clients:%d \r \n " ,
listLength ( server . clients ) - listLength ( server . slaves ) ,
lol , bib ,
server . bpop_blocked_clients ) ;
}
/* Memory */
if ( allsections | | defsections | | ! strcasecmp ( section , " memory " ) ) {
2011-04-21 04:49:52 -04:00
char hmem [ 64 ] ;
char peak_hmem [ 64 ] ;
2014-02-28 17:47:41 -05:00
size_t zmalloc_used = zmalloc_used_memory ( ) ;
2011-04-21 04:49:52 -04:00
2014-03-03 05:19:54 -05:00
/* Peak memory is updated from time to time by serverCron() so it
* may happen that the instantaneous value is slightly bigger than
* the peak value . This may confuse users , so we update the peak
* if found smaller than the current memory usage . */
if ( zmalloc_used > server . stat_peak_memory )
2014-02-28 17:47:41 -05:00
server . stat_peak_memory = zmalloc_used ;
bytesToHuman ( hmem , zmalloc_used ) ;
2011-04-21 04:49:52 -04:00
bytesToHuman ( peak_hmem , server . stat_peak_memory ) ;
2011-01-23 10:41:07 -05:00
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info ,
" # Memory \r \n "
" used_memory:%zu \r \n "
" used_memory_human:%s \r \n "
" used_memory_rss:%zu \r \n "
2011-04-21 04:49:52 -04:00
" used_memory_peak:%zu \r \n "
" used_memory_peak_human:%s \r \n "
2011-05-07 05:40:29 -04:00
" used_memory_lua:%lld \r \n "
2011-01-23 10:41:07 -05:00
" mem_fragmentation_ratio:%.2f \r \n "
2011-04-27 07:35:49 -04:00
" mem_allocator:%s \r \n " ,
2014-02-28 17:47:41 -05:00
zmalloc_used ,
2011-01-23 10:41:07 -05:00
hmem ,
zmalloc_get_rss ( ) ,
2011-04-21 04:49:52 -04:00
server . stat_peak_memory ,
peak_hmem ,
2011-05-07 05:40:29 -04:00
( ( long long ) lua_gc ( server . lua , LUA_GCCOUNT , 0 ) ) * 1024LL ,
2011-01-23 10:41:07 -05:00
zmalloc_get_fragmentation_ratio ( ) ,
2011-06-20 05:54:08 -04:00
ZMALLOC_LIB
2010-11-04 13:50:23 -04:00
) ;
2011-01-23 16:14:15 -05:00
}
2011-01-23 10:41:07 -05:00
/* Persistence */
if ( allsections | | defsections | | ! strcasecmp ( section , " persistence " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
2010-06-21 18:07:48 -04:00
info = sdscatprintf ( info ,
2011-01-23 10:41:07 -05:00
" # Persistence \r \n "
" loading:%d \r \n "
2012-05-25 06:11:30 -04:00
" rdb_changes_since_last_save:%lld \r \n "
" rdb_bgsave_in_progress:%d \r \n "
2012-10-28 00:33:04 -04:00
" rdb_last_save_time:%jd \r \n "
2012-05-25 06:11:30 -04:00
" rdb_last_bgsave_status:%s \r \n "
2012-10-28 00:33:04 -04:00
" rdb_last_bgsave_time_sec:%jd \r \n "
" rdb_current_bgsave_time_sec:%jd \r \n "
2011-01-23 10:41:07 -05:00
" aof_enabled:%d \r \n "
2012-05-25 06:11:30 -04:00
" aof_rewrite_in_progress:%d \r \n "
" aof_rewrite_scheduled:%d \r \n "
2012-10-28 00:33:04 -04:00
" aof_last_rewrite_time_sec:%jd \r \n "
" aof_current_rewrite_time_sec:%jd \r \n "
2014-02-12 06:47:10 -05:00
" aof_last_bgrewrite_status:%s \r \n "
" aof_last_write_status:%s \r \n " ,
2011-01-23 10:41:07 -05:00
server . loading ,
server . dirty ,
2011-12-21 06:22:13 -05:00
server . rdb_child_pid ! = - 1 ,
2012-10-28 00:33:04 -04:00
( intmax_t ) server . lastsave ,
2012-07-16 22:06:53 -04:00
( server . lastbgsave_status = = REDIS_OK ) ? " ok " : " err " ,
2012-10-28 00:33:04 -04:00
( intmax_t ) server . rdb_save_time_last ,
( intmax_t ) ( ( server . rdb_child_pid = = - 1 ) ?
- 1 : time ( NULL ) - server . rdb_save_time_start ) ,
2012-05-25 06:11:30 -04:00
server . aof_state ! = REDIS_AOF_OFF ,
2012-04-06 15:12:50 -04:00
server . aof_child_pid ! = - 1 ,
2012-05-25 06:11:30 -04:00
server . aof_rewrite_scheduled ,
2012-10-28 00:33:04 -04:00
( intmax_t ) server . aof_rewrite_time_last ,
( intmax_t ) ( ( server . aof_child_pid = = - 1 ) ?
- 1 : time ( NULL ) - server . aof_rewrite_time_start ) ,
2014-02-12 06:47:10 -05:00
( server . aof_lastbgrewrite_status = = REDIS_OK ) ? " ok " : " err " ,
( server . aof_last_write_status = = REDIS_OK ) ? " ok " : " err " ) ;
2011-01-23 10:41:07 -05:00
2011-12-21 04:31:34 -05:00
if ( server . aof_state ! = REDIS_AOF_OFF ) {
2011-06-10 08:52:47 -04:00
info = sdscatprintf ( info ,
" aof_current_size:%lld \r \n "
" aof_base_size:%lld \r \n "
2011-11-28 05:04:03 -05:00
" aof_pending_rewrite:%d \r \n "
" aof_buffer_length:%zu \r \n "
2012-09-10 06:42:55 -04:00
" aof_rewrite_buffer_length:%lu \r \n "
2012-03-25 05:27:35 -04:00
" aof_pending_bio_fsync:%llu \r \n "
" aof_delayed_fsync:%lu \r \n " ,
2011-12-21 05:58:42 -05:00
( long long ) server . aof_current_size ,
( long long ) server . aof_rewrite_base_size ,
server . aof_rewrite_scheduled ,
2011-12-21 06:17:02 -05:00
sdslen ( server . aof_buf ) ,
2012-05-24 09:03:23 -04:00
aofRewriteBufferSize ( ) ,
2012-03-25 05:27:35 -04:00
bioPendingJobsOfType ( REDIS_BIO_AOF_FSYNC ) ,
server . aof_delayed_fsync ) ;
2011-06-10 08:52:47 -04:00
}
2011-01-23 10:41:07 -05:00
if ( server . loading ) {
double perc ;
time_t eta , elapsed ;
off_t remaining_bytes = server . loading_total_bytes -
server . loading_loaded_bytes ;
perc = ( ( double ) server . loading_loaded_bytes /
server . loading_total_bytes ) * 100 ;
2012-03-27 11:39:58 -04:00
elapsed = server . unixtime - server . loading_start_time ;
2011-01-23 10:41:07 -05:00
if ( elapsed = = 0 ) {
eta = 1 ; /* A fake 1 second figure if we don't have
enough info */
} else {
eta = ( elapsed * remaining_bytes ) / server . loading_loaded_bytes ;
}
info = sdscatprintf ( info ,
2012-10-28 00:33:04 -04:00
" loading_start_time:%jd \r \n "
2011-01-23 10:41:07 -05:00
" loading_total_bytes:%llu \r \n "
" loading_loaded_bytes:%llu \r \n "
" loading_loaded_perc:%.2f \r \n "
2012-10-28 00:33:04 -04:00
" loading_eta_seconds:%jd \r \n " ,
( intmax_t ) server . loading_start_time ,
2011-01-23 10:41:07 -05:00
( unsigned long long ) server . loading_total_bytes ,
( unsigned long long ) server . loading_loaded_bytes ,
perc ,
2012-10-28 00:33:04 -04:00
( intmax_t ) eta
2011-01-23 10:41:07 -05:00
) ;
}
2010-06-21 18:07:48 -04:00
}
2011-01-23 10:41:07 -05:00
/* Stats */
if ( allsections | | defsections | | ! strcasecmp ( section , " stats " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
2010-11-08 05:52:03 -05:00
info = sdscatprintf ( info ,
2011-01-23 10:41:07 -05:00
" # Stats \r \n "
" total_connections_received:%lld \r \n "
" total_commands_processed:%lld \r \n "
2012-03-08 10:15:37 -05:00
" instantaneous_ops_per_sec:%lld \r \n "
2011-11-23 12:38:12 -05:00
" rejected_connections:%lld \r \n "
2013-02-12 09:24:25 -05:00
" sync_full:%lld \r \n "
" sync_partial_ok:%lld \r \n "
" sync_partial_err:%lld \r \n "
2011-01-23 10:41:07 -05:00
" expired_keys:%lld \r \n "
" evicted_keys:%lld \r \n "
" keyspace_hits:%lld \r \n "
" keyspace_misses:%lld \r \n "
" pubsub_channels:%ld \r \n "
2012-01-31 04:35:52 -05:00
" pubsub_patterns:%lu \r \n "
2012-11-12 08:01:56 -05:00
" latest_fork_usec:%lld \r \n "
" migrate_cached_sockets:%ld \r \n " ,
2011-01-23 10:41:07 -05:00
server . stat_numconnections ,
server . stat_numcommands ,
2012-03-08 10:15:37 -05:00
getOperationsPerSecond ( ) ,
2011-11-23 12:38:12 -05:00
server . stat_rejected_conn ,
2013-02-12 09:24:25 -05:00
server . stat_sync_full ,
server . stat_sync_partial_ok ,
server . stat_sync_partial_err ,
2011-01-23 10:41:07 -05:00
server . stat_expiredkeys ,
server . stat_evictedkeys ,
server . stat_keyspace_hits ,
server . stat_keyspace_misses ,
dictSize ( server . pubsub_channels ) ,
2011-05-29 09:17:29 -04:00
listLength ( server . pubsub_patterns ) ,
2012-11-12 08:01:56 -05:00
server . stat_fork_time ,
dictSize ( server . migrate_cached_sockets ) ) ;
2010-11-08 05:52:03 -05:00
}
2011-01-09 09:56:50 -05:00
2011-01-23 10:41:07 -05:00
/* Replication */
if ( allsections | | defsections | | ! strcasecmp ( section , " replication " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info ,
" # Replication \r \n "
" role:%s \r \n " ,
server . masterhost = = NULL ? " master " : " slave " ) ;
if ( server . masterhost ) {
2013-12-11 09:23:10 -05:00
long long slave_repl_offset = 1 ;
if ( server . master )
slave_repl_offset = server . master - > reploff ;
else if ( server . cached_master )
slave_repl_offset = server . cached_master - > reploff ;
2011-01-23 10:41:07 -05:00
info = sdscatprintf ( info ,
" master_host:%s \r \n "
" master_port:%d \r \n "
" master_link_status:%s \r \n "
" master_last_io_seconds_ago:%d \r \n "
" master_sync_in_progress:%d \r \n "
2013-01-30 12:33:16 -05:00
" slave_repl_offset:%lld \r \n "
2011-01-23 10:41:07 -05:00
, server . masterhost ,
server . masterport ,
2011-12-21 06:23:18 -05:00
( server . repl_state = = REDIS_REPL_CONNECTED ) ?
2011-01-23 10:41:07 -05:00
" up " : " down " ,
server . master ?
2012-03-27 11:39:58 -04:00
( ( int ) ( server . unixtime - server . master - > lastinteraction ) ) : - 1 ,
2013-01-30 12:33:16 -05:00
server . repl_state = = REDIS_REPL_TRANSFER ,
2013-12-11 09:23:10 -05:00
slave_repl_offset
2011-01-23 10:41:07 -05:00
) ;
2011-12-21 06:23:18 -05:00
if ( server . repl_state = = REDIS_REPL_TRANSFER ) {
2011-01-23 10:41:07 -05:00
info = sdscatprintf ( info ,
2012-08-24 13:28:44 -04:00
" master_sync_left_bytes:%lld \r \n "
2011-01-23 10:41:07 -05:00
" master_sync_last_io_seconds_ago:%d \r \n "
2012-08-24 13:28:44 -04:00
, ( long long )
( server . repl_transfer_size - server . repl_transfer_read ) ,
2012-03-27 11:39:58 -04:00
( int ) ( server . unixtime - server . repl_transfer_lastio )
2011-01-23 10:41:07 -05:00
) ;
}
2011-06-17 10:13:22 -04:00
2011-12-21 06:23:18 -05:00
if ( server . repl_state ! = REDIS_REPL_CONNECTED ) {
2011-06-17 10:13:22 -04:00
info = sdscatprintf ( info ,
2012-10-28 00:33:04 -04:00
" master_link_down_since_seconds:%jd \r \n " ,
( intmax_t ) server . unixtime - server . repl_down_since ) ;
2011-06-17 10:13:22 -04:00
}
2012-08-28 11:20:26 -04:00
info = sdscatprintf ( info ,
2012-10-22 13:21:47 -04:00
" slave_priority:%d \r \n "
" slave_read_only:%d \r \n " ,
server . slave_priority ,
server . repl_slave_ro ) ;
2011-01-09 09:56:50 -05:00
}
2013-05-30 06:18:31 -04:00
2011-01-23 10:41:07 -05:00
info = sdscatprintf ( info ,
2012-01-31 04:35:52 -05:00
" connected_slaves:%lu \r \n " ,
2011-01-23 10:41:07 -05:00
listLength ( server . slaves ) ) ;
2013-05-30 06:18:31 -04:00
/* If min-slaves-to-write is active, write the number of slaves
* currently considered ' good ' . */
if ( server . repl_min_slaves_to_write & &
server . repl_min_slaves_max_lag ) {
info = sdscatprintf ( info ,
" min_slaves_good_slaves:%d \r \n " ,
server . repl_good_slaves_count ) ;
}
2011-12-14 09:11:11 -05:00
if ( listLength ( server . slaves ) ) {
int slaveid = 0 ;
listNode * ln ;
listIter li ;
listRewind ( server . slaves , & li ) ;
while ( ( ln = listNext ( & li ) ) ) {
redisClient * slave = listNodeValue ( ln ) ;
char * state = NULL ;
2013-07-09 05:32:52 -04:00
char ip [ REDIS_IP_STR_LEN ] ;
2011-12-14 09:11:11 -05:00
int port ;
2013-05-29 13:52:54 -04:00
long lag = 0 ;
2011-12-14 09:11:11 -05:00
2012-10-17 17:32:21 -04:00
if ( anetPeerToString ( slave - > fd , ip , sizeof ( ip ) , & port ) = = - 1 ) continue ;
2011-12-14 09:11:11 -05:00
switch ( slave - > replstate ) {
case REDIS_REPL_WAIT_BGSAVE_START :
case REDIS_REPL_WAIT_BGSAVE_END :
state = " wait_bgsave " ;
break ;
case REDIS_REPL_SEND_BULK :
state = " send_bulk " ;
break ;
case REDIS_REPL_ONLINE :
state = " online " ;
break ;
}
if ( state = = NULL ) continue ;
2013-05-29 13:52:54 -04:00
if ( slave - > replstate = = REDIS_REPL_ONLINE )
lag = time ( NULL ) - slave - > repl_ack_time ;
info = sdscatprintf ( info ,
" slave%d:ip=%s,port=%d,state=%s, "
2013-05-29 13:56:33 -04:00
" offset=%lld,lag=%ld \r \n " ,
2013-05-24 18:54:00 -04:00
slaveid , ip , slave - > slave_listening_port , state ,
2013-05-29 13:52:54 -04:00
slave - > repl_ack_off , lag ) ;
2011-12-14 09:11:11 -05:00
slaveid + + ;
}
}
2013-01-30 12:33:16 -05:00
info = sdscatprintf ( info ,
" master_repl_offset:%lld \r \n "
" repl_backlog_active:%d \r \n "
" repl_backlog_size:%lld \r \n "
" repl_backlog_first_byte_offset:%lld \r \n "
" repl_backlog_histlen:%lld \r \n " ,
server . master_repl_offset ,
server . repl_backlog ! = NULL ,
server . repl_backlog_size ,
server . repl_backlog_off ,
server . repl_backlog_histlen ) ;
2011-01-09 09:56:50 -05:00
}
2011-01-23 16:14:15 -05:00
/* CPU */
if ( allsections | | defsections | | ! strcasecmp ( section , " cpu " ) ) {
2011-01-23 10:41:07 -05:00
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info ,
2011-01-23 16:14:15 -05:00
" # CPU \r \n "
2011-01-23 10:41:07 -05:00
" used_cpu_sys:%.2f \r \n "
" used_cpu_user:%.2f \r \n "
2011-07-02 04:51:35 -04:00
" used_cpu_sys_children:%.2f \r \n "
" used_cpu_user_children:%.2f \r \n " ,
2011-01-23 10:41:07 -05:00
( float ) self_ru . ru_stime . tv_sec + ( float ) self_ru . ru_stime . tv_usec / 1000000 ,
2011-10-10 09:29:36 -04:00
( float ) self_ru . ru_utime . tv_sec + ( float ) self_ru . ru_utime . tv_usec / 1000000 ,
( float ) c_ru . ru_stime . tv_sec + ( float ) c_ru . ru_stime . tv_usec / 1000000 ,
( float ) c_ru . ru_utime . tv_sec + ( float ) c_ru . ru_utime . tv_usec / 1000000 ) ;
2011-01-23 16:14:15 -05:00
}
2011-01-23 10:41:07 -05:00
2011-01-23 16:14:15 -05:00
/* cmdtime */
if ( allsections | | ! strcasecmp ( section , " commandstats " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info , " # Commandstats \r \n " ) ;
2011-01-24 04:56:06 -05:00
numcommands = sizeof ( redisCommandTable ) / sizeof ( struct redisCommand ) ;
2011-01-23 10:41:07 -05:00
for ( j = 0 ; j < numcommands ; j + + ) {
2011-01-24 04:56:06 -05:00
struct redisCommand * c = redisCommandTable + j ;
2011-01-23 16:14:15 -05:00
2011-01-24 04:56:06 -05:00
if ( ! c - > calls ) continue ;
info = sdscatprintf ( info ,
" cmdstat_%s:calls=%lld,usec=%lld,usec_per_call=%.2f \r \n " ,
c - > name , c - > calls , c - > microseconds ,
( c - > calls = = 0 ) ? 0 : ( ( float ) c - > microseconds / c - > calls ) ) ;
2011-01-23 10:41:07 -05:00
}
2011-01-23 05:46:34 -05:00
}
2012-04-11 04:56:55 -04:00
/* Cluster */
2011-04-11 10:39:39 -04:00
if ( allsections | | defsections | | ! strcasecmp ( section , " cluster " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info ,
" # Cluster \r \n "
" cluster_enabled:%d \r \n " ,
server . cluster_enabled ) ;
}
2011-01-23 10:41:07 -05:00
/* Key space */
if ( allsections | | defsections | | ! strcasecmp ( section , " keyspace " ) ) {
if ( sections + + ) info = sdscat ( info , " \r \n " ) ;
info = sdscatprintf ( info , " # Keyspace \r \n " ) ;
for ( j = 0 ; j < server . dbnum ; j + + ) {
long long keys , vkeys ;
2010-06-21 18:07:48 -04:00
2011-01-23 10:41:07 -05:00
keys = dictSize ( server . db [ j ] . dict ) ;
vkeys = dictSize ( server . db [ j ] . expires ) ;
if ( keys | | vkeys ) {
2013-08-06 09:00:43 -04:00
info = sdscatprintf ( info ,
" db%d:keys=%lld,expires=%lld,avg_ttl=%lld \r \n " ,
j , keys , vkeys , server . db [ j ] . avg_ttl ) ;
2011-01-23 10:41:07 -05:00
}
2010-06-21 18:07:48 -04:00
}
}
return info ;
}
void infoCommand ( redisClient * c ) {
2011-01-23 10:41:07 -05:00
char * section = c - > argc = = 2 ? c - > argv [ 1 ] - > ptr : " default " ;
if ( c - > argc > 2 ) {
addReply ( c , shared . syntaxerr ) ;
return ;
}
sds info = genRedisInfoString ( section ) ;
2010-06-21 18:07:48 -04:00
addReplySds ( c , sdscatprintf ( sdsempty ( ) , " $%lu \r \n " ,
( unsigned long ) sdslen ( info ) ) ) ;
addReplySds ( c , info ) ;
addReply ( c , shared . crlf ) ;
}
void monitorCommand ( redisClient * c ) {
2012-11-01 06:14:55 -04:00
/* ignore MONITOR if already slave or in monitor mode */
2010-06-21 18:07:48 -04:00
if ( c - > flags & REDIS_SLAVE ) return ;
c - > flags | = ( REDIS_SLAVE | REDIS_MONITOR ) ;
listAddNodeTail ( server . monitors , c ) ;
addReply ( c , shared . ok ) ;
}
/* ============================ Maxmemory directive ======================== */
2014-03-20 06:57:29 -04:00
/* freeMemoryIfNeeded() gets called when 'maxmemory' is set on the config
* file to limit the max memory used by the server , before processing a
* command .
2010-06-21 18:07:48 -04:00
*
2012-02-04 08:05:54 -05:00
* The goal of the function is to free enough memory to keep Redis under the
* configured memory limit .
2010-06-21 18:07:48 -04:00
*
2012-02-04 08:05:54 -05:00
* The function starts calculating how many bytes should be freed to keep
* Redis under the limit , and enters a loop selecting the best keys to
* evict accordingly to the configured policy .
*
* If all the bytes needed to return back under the limit were freed the
* function returns REDIS_OK , otherwise REDIS_ERR is returned , and the caller
* should block the execution of commands that will result in more memory
* used by the server .
2014-03-20 06:57:29 -04:00
*
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* LRU approximation algorithm
*
* Redis uses an approximation of the LRU algorithm that runs in constant
* memory . Every time there is a key to expire , we sample a N keys ( with
* N very small , usually in around 5 ) to populate a pool of best keys to
* evict of M keys ( the pool size is defined by REDIS_EVICTION_POOL_SIZE ) .
*
* The N keys sampled are added in the pool of good keys to expire ( the one
* with an old access time ) if they are better then one of the current keys
* in the pool .
*
* After the pool is populated , the best key we have in the pool is expired .
* However note that we don ' t remove keys from the pool when they are deleted
* so the pool may contain keys that no longer exist .
*
* When we try to evict a key , and all the entries in the pool don ' t exist
* we populate it again . This time we ' ll be sure that the pool has at least
* one key that can be evicted , if there is at least one key that can be
* evicted in the whole database . */
/* Create a new eviction pool. */
struct evictionPoolEntry * evictionPoolAlloc ( void ) {
struct evictionPoolEntry * ep ;
int j ;
ep = zmalloc ( sizeof ( * ep ) * REDIS_EVICTION_POOL_SIZE ) ;
for ( j = 0 ; j < REDIS_EVICTION_POOL_SIZE ; j + + ) {
ep [ j ] . idle = 0 ;
ep [ j ] . key = NULL ;
}
return ep ;
}
/* This is an helper function for freeMemoryIfNeeded(), it is used in order
* to populate the evictionPool with a few entries every time we want to
* expire a key . Keys with idle time smaller than one of the current
* keys are added . Keys are always added if there are free entries .
*
* We insert keys on place in ascending order , so keys with the smaller
* idle time are on the left , and keys with the higher idle time on the
* right . */
2014-03-20 11:52:12 -04:00
# define EVICTION_SAMPLES_ARRAY_SIZE 16
2014-03-20 06:57:29 -04:00
void evictionPoolPopulate ( dict * sampledict , dict * keydict , struct evictionPoolEntry * pool ) {
2014-03-20 11:52:12 -04:00
int j , k , count ;
dictEntry * _samples [ EVICTION_SAMPLES_ARRAY_SIZE ] ;
dictEntry * * samples ;
/* Try to use a static buffer: this function is a big hit...
* Note : it was actually measured that this helps . */
if ( server . maxmemory_samples < = EVICTION_SAMPLES_ARRAY_SIZE ) {
samples = _samples ;
} else {
samples = zmalloc ( sizeof ( samples [ 0 ] ) * server . maxmemory_samples ) ;
}
# if 1 /* Use bulk get by default. */
count = dictGetRandomKeys ( sampledict , samples , server . maxmemory_samples ) ;
# else
count = server . maxmemory_samples ;
for ( j = 0 ; j < count ; j + + ) samples [ j ] = dictGetRandomKey ( sampledict ) ;
# endif
2014-03-20 06:57:29 -04:00
2014-03-20 11:52:12 -04:00
for ( j = 0 ; j < count ; j + + ) {
2014-03-20 06:57:29 -04:00
unsigned long long idle ;
sds key ;
robj * o ;
2014-03-20 11:20:37 -04:00
dictEntry * de ;
2014-03-20 06:57:29 -04:00
2014-03-20 11:52:12 -04:00
de = samples [ j ] ;
2014-03-20 06:57:29 -04:00
key = dictGetKey ( de ) ;
/* If the dictionary we are sampling from is not the main
* dictionary ( but the expires one ) we need to lookup the key
* again in the key dictionary to obtain the value object . */
if ( sampledict ! = keydict ) de = dictFind ( keydict , key ) ;
o = dictGetVal ( de ) ;
idle = estimateObjectIdleTime ( o ) ;
/* Insert the element inside the pool.
* First , find the first empty bucket or the first populated
* bucket that has an idle time smaller than our idle time . */
k = 0 ;
while ( k < REDIS_EVICTION_POOL_SIZE & &
pool [ k ] . key & &
pool [ k ] . idle < idle ) k + + ;
if ( k = = 0 & & pool [ REDIS_EVICTION_POOL_SIZE - 1 ] . key ! = NULL ) {
/* Can't insert is the element is < the worst element we have
* and there are no empty buckets . */
continue ;
} else if ( k < REDIS_EVICTION_POOL_SIZE & & pool [ k ] . key = = NULL ) {
/* Inserting into empty position. No setup needed before insert. */
} else {
/* Inserting in the middle. Now k points to the first element
* greater than the element to insert . */
if ( pool [ REDIS_EVICTION_POOL_SIZE - 1 ] . key = = NULL ) {
/* Free space on the right? Insert at k shifting
* all the elements from k to end to the right . */
memmove ( pool + k + 1 , pool + k ,
sizeof ( pool [ 0 ] ) * ( REDIS_EVICTION_POOL_SIZE - k - 1 ) ) ;
} else {
/* No free space on right? Insert at k-1 */
k - - ;
/* Shift all elements on the left of k (included) to the
* left , so we discard the element with smaller idle time . */
sdsfree ( pool [ 0 ] . key ) ;
memmove ( pool , pool + 1 , sizeof ( pool [ 0 ] ) * k ) ;
}
}
pool [ k ] . key = sdsdup ( key ) ;
pool [ k ] . idle = idle ;
}
2014-03-20 11:52:12 -04:00
if ( samples ! = _samples ) zfree ( samples ) ;
2014-03-20 06:57:29 -04:00
}
2012-02-04 08:05:54 -05:00
int freeMemoryIfNeeded ( void ) {
size_t mem_used , mem_tofree , mem_freed ;
int slaves = listLength ( server . slaves ) ;
2012-02-06 10:35:43 -05:00
/* Remove the size of slaves output buffers and AOF buffer from the
* count of used memory . */
2012-02-04 08:05:54 -05:00
mem_used = zmalloc_used_memory ( ) ;
if ( slaves ) {
listIter li ;
listNode * ln ;
listRewind ( server . slaves , & li ) ;
while ( ( ln = listNext ( & li ) ) ) {
redisClient * slave = listNodeValue ( ln ) ;
unsigned long obuf_bytes = getClientOutputBufferMemoryUsage ( slave ) ;
if ( obuf_bytes > mem_used )
mem_used = 0 ;
else
mem_used - = obuf_bytes ;
}
}
2012-02-06 10:35:43 -05:00
if ( server . aof_state ! = REDIS_AOF_OFF ) {
mem_used - = sdslen ( server . aof_buf ) ;
Allow an AOF rewrite buffer > 2GB (Fix for issue #504).
During the AOF rewrite process, the parent process needs to accumulate
the new writes in an in-memory buffer: when the child will terminate the
AOF rewriting process this buffer (that ist the difference between the
dataset when the rewrite was started, and the current dataset) is
flushed to the new AOF file.
We used to implement this buffer using an sds.c string, but sds.c has a
2GB limit. Sometimes the dataset can be big enough, the amount of writes
so high, and the rewrite process slow enough that we overflow the 2GB
limit, causing a crash, documented on github by issue #504.
In order to prevent this from happening, this commit introduces a new
system to accumulate writes, implemented by a linked list of blocks of
10 MB each, so that we also avoid paying the reallocation cost.
Note that theoretically modern operating systems may implement realloc()
simply as a remaping of the old pages, thus with very good performances,
see for instance the mremap() syscall on Linux. However this is not
always true, and jemalloc by default avoids doing this because there are
issues with the current implementation of mremap().
For this reason we are using a linked list of blocks instead of a single
block that gets reallocated again and again.
The changes in this commit lacks testing, that will be performed before
merging into the unstable branch. This fix will not enter 2.4 because it
is too invasive. However 2.4 will log a warning when the AOF rewrite
buffer is near to the 2GB limit.
2012-05-22 07:03:41 -04:00
mem_used - = aofRewriteBufferSize ( ) ;
2012-02-06 10:35:43 -05:00
}
2010-11-08 10:12:16 -05:00
2012-02-04 08:05:54 -05:00
/* Check if we are over the memory limit. */
if ( mem_used < = server . maxmemory ) return REDIS_OK ;
if ( server . maxmemory_policy = = REDIS_MAXMEMORY_NO_EVICTION )
return REDIS_ERR ; /* We need to free memory, but policy forbids. */
/* Compute how much memory we need to free. */
mem_tofree = mem_used - server . maxmemory ;
mem_freed = 0 ;
while ( mem_freed < mem_tofree ) {
int j , k , keys_freed = 0 ;
2010-06-21 18:07:48 -04:00
2010-10-14 15:22:21 -04:00
for ( j = 0 ; j < server . dbnum ; j + + ) {
2010-11-02 06:15:09 -04:00
long bestval = 0 ; /* just to prevent warning */
2010-10-14 15:22:21 -04:00
sds bestkey = NULL ;
2014-03-20 11:20:37 -04:00
dictEntry * de ;
2010-10-14 15:22:21 -04:00
redisDb * db = server . db + j ;
dict * dict ;
if ( server . maxmemory_policy = = REDIS_MAXMEMORY_ALLKEYS_LRU | |
server . maxmemory_policy = = REDIS_MAXMEMORY_ALLKEYS_RANDOM )
{
dict = server . db [ j ] . dict ;
} else {
dict = server . db [ j ] . expires ;
}
if ( dictSize ( dict ) = = 0 ) continue ;
/* volatile-random and allkeys-random policy */
if ( server . maxmemory_policy = = REDIS_MAXMEMORY_ALLKEYS_RANDOM | |
server . maxmemory_policy = = REDIS_MAXMEMORY_VOLATILE_RANDOM )
{
de = dictGetRandomKey ( dict ) ;
2011-11-08 11:07:55 -05:00
bestkey = dictGetKey ( de ) ;
2010-10-14 15:22:21 -04:00
}
/* volatile-lru and allkeys-lru policy */
else if ( server . maxmemory_policy = = REDIS_MAXMEMORY_ALLKEYS_LRU | |
server . maxmemory_policy = = REDIS_MAXMEMORY_VOLATILE_LRU )
{
2014-03-20 06:57:29 -04:00
struct evictionPoolEntry * pool = db - > eviction_pool ;
while ( bestkey = = NULL ) {
evictionPoolPopulate ( dict , db - > dict , db - > eviction_pool ) ;
/* Go backward from best to worst element to evict. */
for ( k = REDIS_EVICTION_POOL_SIZE - 1 ; k > = 0 ; k - - ) {
if ( pool [ k ] . key = = NULL ) continue ;
de = dictFind ( dict , pool [ k ] . key ) ;
/* Remove the entry from the pool. */
sdsfree ( pool [ k ] . key ) ;
/* Shift all elements on its right to left. */
memmove ( pool + k , pool + k + 1 ,
sizeof ( pool [ 0 ] ) * ( REDIS_EVICTION_POOL_SIZE - k ) ) ;
/* Clear the element on the right which is empty
* since we shifted one position to the left . */
pool [ REDIS_EVICTION_POOL_SIZE - 1 ] . key = NULL ;
pool [ REDIS_EVICTION_POOL_SIZE - 1 ] . idle = 0 ;
/* If the key exists, is our pick. Otherwise it is
* a ghost and we need to try the next element . */
if ( de ) {
bestkey = dictGetKey ( de ) ;
break ;
} else {
/* Ghost... */
continue ;
}
2010-10-14 15:22:21 -04:00
}
}
}
/* volatile-ttl */
else if ( server . maxmemory_policy = = REDIS_MAXMEMORY_VOLATILE_TTL ) {
for ( k = 0 ; k < server . maxmemory_samples ; k + + ) {
sds thiskey ;
long thisval ;
de = dictGetRandomKey ( dict ) ;
2011-11-08 11:07:55 -05:00
thiskey = dictGetKey ( de ) ;
thisval = ( long ) dictGetVal ( de ) ;
2010-10-14 15:22:21 -04:00
/* Expire sooner (minor expire unix timestamp) is better
* candidate for deletion */
if ( bestkey = = NULL | | thisval < bestval ) {
bestkey = thiskey ;
bestval = thisval ;
}
}
}
/* Finally remove the selected key. */
if ( bestkey ) {
2012-02-04 08:05:54 -05:00
long long delta ;
2010-10-14 15:22:21 -04:00
robj * keyobj = createStringObject ( bestkey , sdslen ( bestkey ) ) ;
2011-01-27 10:52:37 -05:00
propagateExpire ( db , keyobj ) ;
2012-02-04 08:05:54 -05:00
/* We compute the amount of memory freed by dbDelete() alone.
* It is possible that actually the memory needed to propagate
* the DEL in AOF and replication link is greater than the one
* we are freeing removing the key , but we can ' t account for
* that otherwise we would never exit the loop .
*
* AOF and Output buffer memory will be freed eventually so
* we only care about memory used by the key space . */
delta = ( long long ) zmalloc_used_memory ( ) ;
2010-10-14 15:22:21 -04:00
dbDelete ( db , keyobj ) ;
2012-02-04 08:05:54 -05:00
delta - = ( long long ) zmalloc_used_memory ( ) ;
mem_freed + = delta ;
2010-12-19 09:15:08 -05:00
server . stat_evictedkeys + + ;
2013-01-25 07:19:08 -05:00
notifyKeyspaceEvent ( REDIS_NOTIFY_EVICTED , " evicted " ,
keyobj , db - > id ) ;
2010-10-14 15:22:21 -04:00
decrRefCount ( keyobj ) ;
2012-02-04 08:05:54 -05:00
keys_freed + + ;
/* When the memory to free starts to be big enough, we may
* start spending so much time here that is impossible to
* deliver data to the slaves fast enough , so we force the
* transmission here inside the loop . */
2012-02-06 10:56:42 -05:00
if ( slaves ) flushSlavesOutputBuffers ( ) ;
2010-10-14 15:22:21 -04:00
}
}
2012-02-07 18:10:20 -05:00
if ( ! keys_freed ) return REDIS_ERR ; /* nothing to free... */
2010-10-14 15:22:21 -04:00
}
2012-02-04 08:05:54 -05:00
return REDIS_OK ;
2010-06-21 18:07:48 -04:00
}
/* =================================== Main! ================================ */
# ifdef __linux__
int linuxOvercommitMemoryValue ( void ) {
FILE * fp = fopen ( " /proc/sys/vm/overcommit_memory " , " r " ) ;
char buf [ 64 ] ;
if ( ! fp ) return - 1 ;
if ( fgets ( buf , 64 , fp ) = = NULL ) {
fclose ( fp ) ;
return - 1 ;
}
fclose ( fp ) ;
return atoi ( buf ) ;
}
void linuxOvercommitMemoryWarning ( void ) {
if ( linuxOvercommitMemoryValue ( ) = = 0 ) {
redisLog ( REDIS_WARNING , " WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. " ) ;
}
}
# endif /* __linux__ */
2010-08-24 11:09:25 -04:00
void createPidFile ( void ) {
/* Try to write the pid file in a best-effort way. */
FILE * fp = fopen ( server . pidfile , " w " ) ;
if ( fp ) {
2010-12-23 06:25:56 -05:00
fprintf ( fp , " %d \n " , ( int ) getpid ( ) ) ;
2010-08-24 11:09:25 -04:00
fclose ( fp ) ;
}
}
2010-06-21 18:07:48 -04:00
void daemonize ( void ) {
int fd ;
if ( fork ( ) ! = 0 ) exit ( 0 ) ; /* parent exits */
setsid ( ) ; /* create a new session */
/* Every output goes to /dev/null. If Redis is daemonized but
* the ' logfile ' is set to ' stdout ' in the configuration file
* it will not log at all . */
if ( ( fd = open ( " /dev/null " , O_RDWR , 0 ) ) ! = - 1 ) {
dup2 ( fd , STDIN_FILENO ) ;
dup2 ( fd , STDOUT_FILENO ) ;
dup2 ( fd , STDERR_FILENO ) ;
if ( fd > STDERR_FILENO ) close ( fd ) ;
}
}
void version ( ) {
2012-11-29 08:20:08 -05:00
printf ( " Redis server v=%s sha=%s:%d malloc=%s bits=%d build=%llx \n " ,
2012-04-12 05:50:18 -04:00
REDIS_VERSION ,
redisGitSHA1 ( ) ,
atoi ( redisGitDirty ( ) ) > 0 ,
ZMALLOC_LIB ,
2012-11-29 08:20:08 -05:00
sizeof ( long ) = = 4 ? 32 : 64 ,
2013-02-27 06:33:27 -05:00
( unsigned long long ) redisBuildId ( ) ) ;
2010-06-21 18:07:48 -04:00
exit ( 0 ) ;
}
void usage ( ) {
2011-12-01 07:50:36 -05:00
fprintf ( stderr , " Usage: ./redis-server [/path/to/redis.conf] [options] \n " ) ;
2010-06-21 18:07:48 -04:00
fprintf ( stderr , " ./redis-server - (read config from stdin) \n " ) ;
2011-12-01 07:50:36 -05:00
fprintf ( stderr , " ./redis-server -v or --version \n " ) ;
2012-03-16 12:17:39 -04:00
fprintf ( stderr , " ./redis-server -h or --help \n " ) ;
fprintf ( stderr , " ./redis-server --test-memory <megabytes> \n \n " ) ;
2011-12-01 07:50:36 -05:00
fprintf ( stderr , " Examples: \n " ) ;
fprintf ( stderr , " ./redis-server (run the server with default conf) \n " ) ;
fprintf ( stderr , " ./redis-server /etc/redis/6379.conf \n " ) ;
fprintf ( stderr , " ./redis-server --port 7777 \n " ) ;
fprintf ( stderr , " ./redis-server --port 7777 --slaveof 127.0.0.1 8888 \n " ) ;
2012-07-23 06:54:52 -04:00
fprintf ( stderr , " ./redis-server /etc/myredis.conf --loglevel verbose \n \n " ) ;
fprintf ( stderr , " Sentinel mode: \n " ) ;
fprintf ( stderr , " ./redis-server /etc/sentinel.conf --sentinel \n " ) ;
2010-06-21 18:07:48 -04:00
exit ( 1 ) ;
}
2011-04-13 04:58:21 -04:00
void redisAsciiArt ( void ) {
# include "asciilogo.h"
char * buf = zmalloc ( 1024 * 16 ) ;
2012-07-23 06:54:52 -04:00
char * mode = " stand alone " ;
if ( server . cluster_enabled ) mode = " cluster " ;
else if ( server . sentinel_mode ) mode = " sentinel " ;
2011-04-13 04:58:21 -04:00
snprintf ( buf , 1024 * 16 , ascii_logo ,
REDIS_VERSION ,
redisGitSHA1 ( ) ,
strtol ( redisGitDirty ( ) , NULL , 10 ) > 0 ,
( sizeof ( long ) = = 8 ) ? " 64 " : " 32 " ,
2012-07-23 06:54:52 -04:00
mode , server . port ,
2011-04-13 04:58:21 -04:00
( long ) getpid ( )
) ;
redisLogRaw ( REDIS_NOTICE | REDIS_LOG_RAW , buf ) ;
zfree ( buf ) ;
}
2011-03-06 11:49:22 -05:00
static void sigtermHandler ( int sig ) {
2010-06-21 18:07:48 -04:00
REDIS_NOTUSED ( sig ) ;
2012-03-28 07:45:39 -04:00
redisLogFromHandler ( REDIS_WARNING , " Received SIGTERM, scheduling shutdown... " ) ;
2010-06-21 18:07:48 -04:00
server . shutdown_asap = 1 ;
}
2011-03-06 11:49:22 -05:00
void setupSignalHandlers ( void ) {
2010-06-21 18:07:48 -04:00
struct sigaction act ;
2011-03-06 11:49:22 -05:00
/* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction is used.
* Otherwise , sa_handler is used . */
sigemptyset ( & act . sa_mask ) ;
2012-04-03 11:40:31 -04:00
act . sa_flags = 0 ;
2010-06-21 18:07:48 -04:00
act . sa_handler = sigtermHandler ;
2011-03-06 11:49:22 -05:00
sigaction ( SIGTERM , & act , NULL ) ;
2010-06-21 18:07:48 -04:00
2011-03-06 11:49:22 -05:00
# ifdef HAVE_BACKTRACE
sigemptyset ( & act . sa_mask ) ;
2012-04-26 10:21:19 -04:00
act . sa_flags = SA_NODEFER | SA_RESETHAND | SA_SIGINFO ;
2011-03-06 11:49:22 -05:00
act . sa_sigaction = sigsegvHandler ;
sigaction ( SIGSEGV , & act , NULL ) ;
sigaction ( SIGBUS , & act , NULL ) ;
sigaction ( SIGFPE , & act , NULL ) ;
sigaction ( SIGILL , & act , NULL ) ;
# endif
return ;
2010-06-21 18:07:48 -04:00
}
2012-03-16 12:17:39 -04:00
void memtest ( size_t megabytes , int passes ) ;
2012-07-23 06:54:52 -04:00
/* Returns 1 if there is --sentinel among the arguments or if
* argv [ 0 ] is exactly " redis-sentinel " . */
int checkForSentinelMode ( int argc , char * * argv ) {
int j ;
if ( strstr ( argv [ 0 ] , " redis-sentinel " ) ! = NULL ) return 1 ;
for ( j = 1 ; j < argc ; j + + )
if ( ! strcmp ( argv [ j ] , " --sentinel " ) ) return 1 ;
return 0 ;
}
/* Function called at startup to load RDB or AOF file in memory. */
void loadDataFromDisk ( void ) {
long long start = ustime ( ) ;
if ( server . aof_state = = REDIS_AOF_ON ) {
if ( loadAppendOnlyFile ( server . aof_filename ) = = REDIS_OK )
redisLog ( REDIS_NOTICE , " DB loaded from append only file: %.3f seconds " , ( float ) ( ustime ( ) - start ) / 1000000 ) ;
} else {
if ( rdbLoad ( server . rdb_filename ) = = REDIS_OK ) {
redisLog ( REDIS_NOTICE , " DB loaded from disk: %.3f seconds " ,
( float ) ( ustime ( ) - start ) / 1000000 ) ;
} else if ( errno ! = ENOENT ) {
2013-03-12 13:37:50 -04:00
redisLog ( REDIS_WARNING , " Fatal error loading the DB: %s. Exiting. " , strerror ( errno ) ) ;
2012-07-23 06:54:52 -04:00
exit ( 1 ) ;
}
}
}
2012-08-24 06:55:37 -04:00
void redisOutOfMemoryHandler ( size_t allocation_size ) {
redisLog ( REDIS_WARNING , " Out Of Memory allocating %zu bytes! " ,
allocation_size ) ;
2013-04-19 09:11:34 -04:00
redisPanic ( " Redis aborting for OUT OF MEMORY " ) ;
2012-08-24 06:55:37 -04:00
}
2013-02-26 05:52:12 -05:00
void redisSetProcTitle ( char * title ) {
2013-02-26 22:53:11 -05:00
# ifdef USE_SETPROCTITLE
2014-02-20 23:45:56 -05:00
char * server_mode = " " ;
if ( server . cluster_enabled ) server_mode = " [cluster] " ;
else if ( server . sentinel_mode ) server_mode = " [sentinel] " ;
setproctitle ( " %s %s:%d%s " ,
2013-02-26 05:52:12 -05:00
title ,
2013-07-04 12:50:15 -04:00
server . bindaddr_count ? server . bindaddr [ 0 ] : " * " ,
2014-02-20 23:45:56 -05:00
server . port ,
server_mode ) ;
2013-02-26 22:53:11 -05:00
# else
REDIS_NOTUSED ( title ) ;
# endif
2013-02-26 05:52:12 -05:00
}
2011-12-01 06:15:44 -05:00
int main ( int argc , char * * argv ) {
2012-01-21 17:05:32 -05:00
struct timeval tv ;
2011-12-01 06:15:44 -05:00
2012-01-21 17:34:06 -05:00
/* We need to initialize our libraries, and the server configuration. */
2013-02-26 22:53:11 -05:00
# ifdef INIT_SETPROCTITLE_REPLACEMENT
spt_init ( argc , argv ) ;
# endif
2013-07-12 06:06:05 -04:00
setlocale ( LC_COLLATE , " " ) ;
2011-12-01 06:15:44 -05:00
zmalloc_enable_thread_safeness ( ) ;
2012-08-24 06:55:37 -04:00
zmalloc_set_oom_handler ( redisOutOfMemoryHandler ) ;
2012-01-21 17:05:32 -05:00
srand ( time ( NULL ) ^ getpid ( ) ) ;
gettimeofday ( & tv , NULL ) ;
dictSetHashFunctionSeed ( tv . tv_sec ^ tv . tv_usec ^ getpid ( ) ) ;
2012-07-23 06:54:52 -04:00
server . sentinel_mode = checkForSentinelMode ( argc , argv ) ;
2011-12-01 06:15:44 -05:00
initServerConfig ( ) ;
2012-01-21 17:05:32 -05:00
2012-07-23 06:54:52 -04:00
/* We need to init sentinel right now as parsing the configuration file
* in sentinel mode will have the effect of populating the sentinel
* data structures with master nodes to monitor . */
if ( server . sentinel_mode ) {
initSentinelConfig ( ) ;
initSentinel ( ) ;
}
2011-12-01 07:44:53 -05:00
if ( argc > = 2 ) {
int j = 1 ; /* First option to parse in argv[] */
sds options = sdsempty ( ) ;
char * configfile = NULL ;
/* Handle special options --help and --version */
2011-12-01 06:15:44 -05:00
if ( strcmp ( argv [ 1 ] , " -v " ) = = 0 | |
strcmp ( argv [ 1 ] , " --version " ) = = 0 ) version ( ) ;
2011-12-01 06:18:22 -05:00
if ( strcmp ( argv [ 1 ] , " --help " ) = = 0 | |
strcmp ( argv [ 1 ] , " -h " ) = = 0 ) usage ( ) ;
2012-03-16 12:17:39 -04:00
if ( strcmp ( argv [ 1 ] , " --test-memory " ) = = 0 ) {
if ( argc = = 3 ) {
2012-03-18 13:03:27 -04:00
memtest ( atoi ( argv [ 2 ] ) , 50 ) ;
2012-03-16 12:17:39 -04:00
exit ( 0 ) ;
} else {
fprintf ( stderr , " Please specify the amount of memory to test in megabytes. \n " ) ;
fprintf ( stderr , " Example: ./redis-server --test-memory 4096 \n \n " ) ;
exit ( 1 ) ;
}
}
2011-12-01 07:44:53 -05:00
/* First argument is the config file name? */
if ( argv [ j ] [ 0 ] ! = ' - ' | | argv [ j ] [ 1 ] ! = ' - ' )
configfile = argv [ j + + ] ;
/* All the other options are parsed and conceptually appended to the
* configuration file . For instance - - port 6380 will generate the
* string " port 6380 \n " to be parsed after the actual file name
* is parsed , if any . */
while ( j ! = argc ) {
if ( argv [ j ] [ 0 ] = = ' - ' & & argv [ j ] [ 1 ] = = ' - ' ) {
/* Option name */
if ( sdslen ( options ) ) options = sdscat ( options , " \n " ) ;
options = sdscat ( options , argv [ j ] + 2 ) ;
options = sdscat ( options , " " ) ;
} else {
/* Option argument */
options = sdscatrepr ( options , argv [ j ] , strlen ( argv [ j ] ) ) ;
options = sdscat ( options , " " ) ;
}
j + + ;
}
2014-02-17 06:14:19 -05:00
if ( configfile ) server . configfile = getAbsolutePath ( configfile ) ;
2011-12-01 06:15:44 -05:00
resetServerSaveParams ( ) ;
2011-12-01 07:44:53 -05:00
loadServerConfig ( configfile , options ) ;
sdsfree ( options ) ;
2011-12-01 06:15:44 -05:00
} else {
2012-07-25 09:55:53 -04:00
redisLog ( REDIS_WARNING , " Warning: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf " , argv [ 0 ] , server . sentinel_mode ? " sentinel " : " redis " ) ;
2011-12-01 06:15:44 -05:00
}
if ( server . daemonize ) daemonize ( ) ;
initServer ( ) ;
if ( server . daemonize ) createPidFile ( ) ;
2013-02-26 05:52:12 -05:00
redisSetProcTitle ( argv [ 0 ] ) ;
2011-12-01 06:15:44 -05:00
redisAsciiArt ( ) ;
2012-07-23 06:54:52 -04:00
if ( ! server . sentinel_mode ) {
2013-02-25 05:40:21 -05:00
/* Things not needed when running in Sentinel mode. */
2012-07-23 06:54:52 -04:00
redisLog ( REDIS_WARNING , " Server started, Redis version " REDIS_VERSION ) ;
# ifdef __linux__
linuxOvercommitMemoryWarning ( ) ;
# endif
loadDataFromDisk ( ) ;
2013-02-25 05:20:17 -05:00
if ( server . cluster_enabled ) {
if ( verifyClusterConfigWithData ( ) = = REDIS_ERR ) {
redisLog ( REDIS_WARNING ,
" You can't have keys in a DB different than DB 0 when in "
" Cluster mode. Exiting. " ) ;
exit ( 1 ) ;
}
}
2013-07-05 05:47:20 -04:00
if ( server . ipfd_count > 0 )
2012-07-23 06:54:52 -04:00
redisLog ( REDIS_NOTICE , " The server is now ready to accept connections on port %d " , server . port ) ;
if ( server . sofd > 0 )
redisLog ( REDIS_NOTICE , " The server is now ready to accept connections at %s " , server . unixsocket ) ;
2013-11-19 10:50:04 -05:00
} else {
2013-11-21 06:27:14 -05:00
sentinelIsRunning ( ) ;
2011-12-01 06:15:44 -05:00
}
2012-07-23 06:54:52 -04:00
2012-10-05 04:48:49 -04:00
/* Warning the user about suspicious maxmemory setting. */
if ( server . maxmemory > 0 & & server . maxmemory < 1024 * 1024 ) {
redisLog ( REDIS_WARNING , " WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want? " , server . maxmemory ) ;
}
2011-12-01 06:15:44 -05:00
aeSetBeforeSleepProc ( server . el , beforeSleep ) ;
aeMain ( server . el ) ;
aeDeleteEventLoop ( server . el ) ;
return 0 ;
}
2010-06-21 18:07:48 -04:00
/* The End */