2012-11-08 12:25:23 -05:00
/*
* Copyright ( c ) 2009 - 2012 , Salvatore Sanfilippo < antirez at gmail dot com >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* * Redistributions of source code must retain the above copyright notice ,
* this list of conditions and the following disclaimer .
* * Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in the
* documentation and / or other materials provided with the distribution .
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS
* INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN
* CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE )
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE .
*/
2015-07-26 09:14:57 -04:00
# include "server.h"
2011-10-23 04:42:16 -04:00
# include "lzf.h" /* LZF compression library */
2012-01-03 01:14:10 -05:00
# include "zipmap.h"
2012-04-09 16:40:41 -04:00
# include "endianconv.h"
2011-10-23 04:42:16 -04:00
2010-06-21 18:07:48 -04:00
# include <math.h>
2010-07-01 15:13:38 -04:00
# include <sys/types.h>
# include <sys/time.h>
# include <sys/resource.h>
# include <sys/wait.h>
# include <arpa/inet.h>
2010-11-08 05:52:03 -05:00
# include <sys/stat.h>
2016-02-15 10:14:56 -05:00
# include <sys/param.h>
2010-06-21 18:07:48 -04:00
2016-07-01 09:26:55 -04:00
# define rdbExitReportCorruptRDB(...) rdbCheckThenExit(__LINE__,__VA_ARGS__)
2014-05-12 11:44:37 -04:00
2016-07-01 03:36:52 -04:00
extern int rdbCheckMode ;
void rdbCheckError ( const char * fmt , . . . ) ;
2016-07-01 05:59:25 -04:00
void rdbCheckSetError ( const char * fmt , . . . ) ;
2016-07-01 03:36:52 -04:00
2016-07-01 09:26:55 -04:00
void rdbCheckThenExit ( int linenum , char * reason , . . . ) {
va_list ap ;
char msg [ 1024 ] ;
int len ;
len = snprintf ( msg , sizeof ( msg ) ,
" Internal error in RDB reading function at rdb.c:%d -> " , linenum ) ;
va_start ( ap , reason ) ;
vsnprintf ( msg + len , sizeof ( msg ) - len , reason , ap ) ;
va_end ( ap ) ;
if ( ! rdbCheckMode ) {
serverLog ( LL_WARNING , " %s " , msg ) ;
2016-07-01 03:36:52 -04:00
char * argv [ 2 ] = { " " , server . rdb_filename } ;
2017-07-10 07:38:23 -04:00
redis_check_rdb_main ( 2 , argv , NULL ) ;
2016-07-01 03:36:52 -04:00
} else {
2016-07-01 09:26:55 -04:00
rdbCheckError ( " %s " , msg ) ;
2016-07-01 03:36:52 -04:00
}
2014-05-12 11:44:37 -04:00
exit ( 1 ) ;
}
2011-05-13 11:31:00 -04:00
static int rdbWriteRaw ( rio * rdb , void * p , size_t len ) {
2011-05-14 06:47:42 -04:00
if ( rdb & & rioWrite ( rdb , p , len ) = = 0 )
2011-05-13 11:31:00 -04:00
return - 1 ;
2010-11-21 10:12:25 -05:00
return len ;
}
2011-05-13 11:31:00 -04:00
int rdbSaveType ( rio * rdb , unsigned char type ) {
return rdbWriteRaw ( rdb , & type , 1 ) ;
2010-06-21 18:07:48 -04:00
}
2012-06-02 04:21:57 -04:00
/* Load a "type" in RDB format, that is a one byte unsigned integer.
* This function is not only used to load object types , but also special
* " types " like the end - of - file type , the EXPIRE type , and so forth . */
2011-05-13 17:24:19 -04:00
int rdbLoadType ( rio * rdb ) {
unsigned char type ;
if ( rioRead ( rdb , & type , 1 ) = = 0 ) return - 1 ;
return type ;
2010-06-21 18:07:48 -04:00
}
2011-05-13 17:24:19 -04:00
time_t rdbLoadTime ( rio * rdb ) {
int32_t t32 ;
if ( rioRead ( rdb , & t32 , 4 ) = = 0 ) return - 1 ;
return ( time_t ) t32 ;
2010-06-21 18:07:48 -04:00
}
2011-11-09 12:47:48 -05:00
int rdbSaveMillisecondTime ( rio * rdb , long long t ) {
2011-11-09 10:51:19 -05:00
int64_t t64 = ( int64_t ) t ;
return rdbWriteRaw ( rdb , & t64 , 8 ) ;
}
long long rdbLoadMillisecondTime ( rio * rdb ) {
int64_t t64 ;
if ( rioRead ( rdb , & t64 , 8 ) = = 0 ) return - 1 ;
return ( long long ) t64 ;
}
2011-05-13 17:24:19 -04:00
/* Saves an encoded length. The first two bits in the first byte are used to
2015-07-27 03:41:48 -04:00
* hold the encoding type . See the RDB_ * definitions for more information
2011-05-13 17:24:19 -04:00
* on the types of encoding . */
2016-06-01 05:35:47 -04:00
int rdbSaveLen ( rio * rdb , uint64_t len ) {
2010-06-21 18:07:48 -04:00
unsigned char buf [ 2 ] ;
2011-05-13 11:31:00 -04:00
size_t nwritten ;
2010-06-21 18:07:48 -04:00
if ( len < ( 1 < < 6 ) ) {
/* Save a 6 bit len */
2015-07-27 03:41:48 -04:00
buf [ 0 ] = ( len & 0xFF ) | ( RDB_6BITLEN < < 6 ) ;
2011-05-13 11:31:00 -04:00
if ( rdbWriteRaw ( rdb , buf , 1 ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten = 1 ;
2010-06-21 18:07:48 -04:00
} else if ( len < ( 1 < < 14 ) ) {
/* Save a 14 bit len */
2015-07-27 03:41:48 -04:00
buf [ 0 ] = ( ( len > > 8 ) & 0xFF ) | ( RDB_14BITLEN < < 6 ) ;
2010-06-21 18:07:48 -04:00
buf [ 1 ] = len & 0xFF ;
2011-05-13 11:31:00 -04:00
if ( rdbWriteRaw ( rdb , buf , 2 ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten = 2 ;
2016-06-01 05:35:47 -04:00
} else if ( len < = UINT32_MAX ) {
2010-06-21 18:07:48 -04:00
/* Save a 32 bit len */
2016-06-01 05:35:47 -04:00
buf [ 0 ] = RDB_32BITLEN ;
2011-05-13 11:31:00 -04:00
if ( rdbWriteRaw ( rdb , buf , 1 ) = = - 1 ) return - 1 ;
2016-06-01 05:35:47 -04:00
uint32_t len32 = htonl ( len ) ;
if ( rdbWriteRaw ( rdb , & len32 , 4 ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten = 1 + 4 ;
2016-06-01 05:35:47 -04:00
} else {
/* Save a 64 bit len */
buf [ 0 ] = RDB_64BITLEN ;
if ( rdbWriteRaw ( rdb , buf , 1 ) = = - 1 ) return - 1 ;
len = htonu64 ( len ) ;
if ( rdbWriteRaw ( rdb , & len , 8 ) = = - 1 ) return - 1 ;
nwritten = 1 + 8 ;
2010-06-21 18:07:48 -04:00
}
2010-11-21 09:39:34 -05:00
return nwritten ;
2010-06-21 18:07:48 -04:00
}
2016-06-01 14:18:28 -04:00
/* Load an encoded length. If the loaded length is a normal length as stored
* with rdbSaveLen ( ) , the read length is set to ' * lenptr ' . If instead the
* loaded length describes a special encoding that follows , then ' * isencoded '
* is set to 1 and the encoding format is stored at ' * lenptr ' .
*
* See the RDB_ENC_ * definitions in rdb . h for more information on special
* encodings .
*
* The function returns - 1 on error , 0 on success . */
int rdbLoadLenByRef ( rio * rdb , int * isencoded , uint64_t * lenptr ) {
2011-05-13 17:24:19 -04:00
unsigned char buf [ 2 ] ;
int type ;
if ( isencoded ) * isencoded = 0 ;
2016-06-01 14:18:28 -04:00
if ( rioRead ( rdb , buf , 1 ) = = 0 ) return - 1 ;
2011-05-13 17:24:19 -04:00
type = ( buf [ 0 ] & 0xC0 ) > > 6 ;
2015-07-27 03:41:48 -04:00
if ( type = = RDB_ENCVAL ) {
2011-05-13 17:24:19 -04:00
/* Read a 6 bit encoding type. */
if ( isencoded ) * isencoded = 1 ;
2016-06-01 14:18:28 -04:00
* lenptr = buf [ 0 ] & 0x3F ;
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_6BITLEN ) {
2011-05-13 17:24:19 -04:00
/* Read a 6 bit len. */
2016-06-01 14:18:28 -04:00
* lenptr = buf [ 0 ] & 0x3F ;
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_14BITLEN ) {
2011-05-13 17:24:19 -04:00
/* Read a 14 bit len. */
2016-06-01 14:18:28 -04:00
if ( rioRead ( rdb , buf + 1 , 1 ) = = 0 ) return - 1 ;
* lenptr = ( ( buf [ 0 ] & 0x3F ) < < 8 ) | buf [ 1 ] ;
2016-06-01 05:35:47 -04:00
} else if ( buf [ 0 ] = = RDB_32BITLEN ) {
2011-05-13 17:24:19 -04:00
/* Read a 32 bit len. */
2016-06-01 05:35:47 -04:00
uint32_t len ;
2016-06-01 14:18:28 -04:00
if ( rioRead ( rdb , & len , 4 ) = = 0 ) return - 1 ;
* lenptr = ntohl ( len ) ;
2016-06-01 05:35:47 -04:00
} else if ( buf [ 0 ] = = RDB_64BITLEN ) {
/* Read a 64 bit len. */
uint64_t len ;
2016-06-01 14:18:28 -04:00
if ( rioRead ( rdb , & len , 8 ) = = 0 ) return - 1 ;
* lenptr = ntohu64 ( len ) ;
2016-06-01 05:35:47 -04:00
} else {
2016-07-01 09:26:55 -04:00
rdbExitReportCorruptRDB (
" Unknown length encoding %d in rdbLoadLen() " , type ) ;
2016-06-01 14:18:28 -04:00
return - 1 ; /* Never reached. */
2011-05-13 17:24:19 -04:00
}
2016-06-01 14:18:28 -04:00
return 0 ;
}
/* This is like rdbLoadLenByRef() but directly returns the value read
* from the RDB stream , signaling an error by returning RDB_LENERR
* ( since it is a too large count to be applicable in any Redis data
* structure ) . */
uint64_t rdbLoadLen ( rio * rdb , int * isencoded ) {
uint64_t len ;
if ( rdbLoadLenByRef ( rdb , isencoded , & len ) = = - 1 ) return RDB_LENERR ;
return len ;
2011-05-13 17:24:19 -04:00
}
/* Encodes the "value" argument as integer when it fits in the supported ranges
* for encoded types . If the function successfully encodes the integer , the
* representation is stored in the buffer pointer to by " enc " and the string
* length is returned . Otherwise 0 is returned . */
2010-06-21 18:07:48 -04:00
int rdbEncodeInteger ( long long value , unsigned char * enc ) {
if ( value > = - ( 1 < < 7 ) & & value < = ( 1 < < 7 ) - 1 ) {
2015-07-27 03:41:48 -04:00
enc [ 0 ] = ( RDB_ENCVAL < < 6 ) | RDB_ENC_INT8 ;
2010-06-21 18:07:48 -04:00
enc [ 1 ] = value & 0xFF ;
return 2 ;
} else if ( value > = - ( 1 < < 15 ) & & value < = ( 1 < < 15 ) - 1 ) {
2015-07-27 03:41:48 -04:00
enc [ 0 ] = ( RDB_ENCVAL < < 6 ) | RDB_ENC_INT16 ;
2010-06-21 18:07:48 -04:00
enc [ 1 ] = value & 0xFF ;
enc [ 2 ] = ( value > > 8 ) & 0xFF ;
return 3 ;
} else if ( value > = - ( ( long long ) 1 < < 31 ) & & value < = ( ( long long ) 1 < < 31 ) - 1 ) {
2015-07-27 03:41:48 -04:00
enc [ 0 ] = ( RDB_ENCVAL < < 6 ) | RDB_ENC_INT32 ;
2010-06-21 18:07:48 -04:00
enc [ 1 ] = value & 0xFF ;
enc [ 2 ] = ( value > > 8 ) & 0xFF ;
enc [ 3 ] = ( value > > 16 ) & 0xFF ;
enc [ 4 ] = ( value > > 24 ) & 0xFF ;
return 5 ;
} else {
return 0 ;
}
}
2011-05-13 17:24:19 -04:00
/* Loads an integer-encoded object with the specified encoding type "enctype".
2014-12-23 13:26:34 -05:00
* The returned value changes according to the flags , see
* rdbGenerincLoadStringObject ( ) for more info . */
2016-05-18 05:45:40 -04:00
void * rdbLoadIntegerObject ( rio * rdb , int enctype , int flags , size_t * lenptr ) {
2014-12-23 13:26:34 -05:00
int plain = flags & RDB_LOAD_PLAIN ;
2015-07-31 12:01:23 -04:00
int sds = flags & RDB_LOAD_SDS ;
2014-12-23 13:26:34 -05:00
int encode = flags & RDB_LOAD_ENC ;
2011-05-13 17:24:19 -04:00
unsigned char enc [ 4 ] ;
long long val ;
2015-07-27 03:41:48 -04:00
if ( enctype = = RDB_ENC_INT8 ) {
2011-05-13 17:24:19 -04:00
if ( rioRead ( rdb , enc , 1 ) = = 0 ) return NULL ;
val = ( signed char ) enc [ 0 ] ;
2015-07-27 03:41:48 -04:00
} else if ( enctype = = RDB_ENC_INT16 ) {
2011-05-13 17:24:19 -04:00
uint16_t v ;
if ( rioRead ( rdb , enc , 2 ) = = 0 ) return NULL ;
v = enc [ 0 ] | ( enc [ 1 ] < < 8 ) ;
val = ( int16_t ) v ;
2015-07-27 03:41:48 -04:00
} else if ( enctype = = RDB_ENC_INT32 ) {
2011-05-13 17:24:19 -04:00
uint32_t v ;
if ( rioRead ( rdb , enc , 4 ) = = 0 ) return NULL ;
v = enc [ 0 ] | ( enc [ 1 ] < < 8 ) | ( enc [ 2 ] < < 16 ) | ( enc [ 3 ] < < 24 ) ;
val = ( int32_t ) v ;
} else {
val = 0 ; /* anti-warning */
2016-07-01 09:26:55 -04:00
rdbExitReportCorruptRDB ( " Unknown RDB integer encoding type %d " , enctype ) ;
2011-05-13 17:24:19 -04:00
}
2015-07-31 12:01:23 -04:00
if ( plain | | sds ) {
2015-07-27 03:41:48 -04:00
char buf [ LONG_STR_SIZE ] , * p ;
2014-12-23 13:26:34 -05:00
int len = ll2string ( buf , sizeof ( buf ) , val ) ;
2016-05-18 05:45:40 -04:00
if ( lenptr ) * lenptr = len ;
2015-07-31 12:01:23 -04:00
p = plain ? zmalloc ( len ) : sdsnewlen ( NULL , len ) ;
2014-12-23 13:26:34 -05:00
memcpy ( p , buf , len ) ;
return p ;
} else if ( encode ) {
2011-05-13 17:24:19 -04:00
return createStringObjectFromLongLong ( val ) ;
2014-12-23 13:26:34 -05:00
} else {
2015-07-26 09:28:00 -04:00
return createObject ( OBJ_STRING , sdsfromlonglong ( val ) ) ;
2014-12-23 13:26:34 -05:00
}
2011-05-13 17:24:19 -04:00
}
2010-06-21 18:07:48 -04:00
/* String objects in the form "2391" "-100" without any space and with a
* range of values that can fit in an 8 , 16 or 32 bit signed value can be
* encoded as integers to save space */
int rdbTryIntegerEncoding ( char * s , size_t len , unsigned char * enc ) {
long long value ;
char * endptr , buf [ 32 ] ;
/* Check if it's possible to encode this value as a number */
value = strtoll ( s , & endptr , 10 ) ;
if ( endptr [ 0 ] ! = ' \0 ' ) return 0 ;
ll2string ( buf , 32 , value ) ;
/* If the number converted back into a string is not identical
* then it ' s not possible to encode the string as integer */
if ( strlen ( buf ) ! = len | | memcmp ( buf , s , len ) ) return 0 ;
return rdbEncodeInteger ( value , enc ) ;
}
2015-01-18 15:54:30 -05:00
ssize_t rdbSaveLzfBlob ( rio * rdb , void * data , size_t compress_len ,
size_t original_len ) {
2010-06-21 18:07:48 -04:00
unsigned char byte ;
2015-01-18 15:54:30 -05:00
ssize_t n , nwritten = 0 ;
2010-06-21 18:07:48 -04:00
/* Data compressed! Let's save it on disk */
2015-07-27 03:41:48 -04:00
byte = ( RDB_ENCVAL < < 6 ) | RDB_ENC_LZF ;
2011-05-13 11:31:00 -04:00
if ( ( n = rdbWriteRaw ( rdb , & byte , 1 ) ) = = - 1 ) goto writeerr ;
2010-11-21 10:12:25 -05:00
nwritten + = n ;
2010-11-21 09:39:34 -05:00
2014-12-10 21:26:31 -05:00
if ( ( n = rdbSaveLen ( rdb , compress_len ) ) = = - 1 ) goto writeerr ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2014-12-10 21:26:31 -05:00
if ( ( n = rdbSaveLen ( rdb , original_len ) ) = = - 1 ) goto writeerr ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2014-12-10 21:26:31 -05:00
if ( ( n = rdbWriteRaw ( rdb , data , compress_len ) ) = = - 1 ) goto writeerr ;
2010-11-21 10:12:25 -05:00
nwritten + = n ;
2010-11-21 09:39:34 -05:00
return nwritten ;
2010-06-21 18:07:48 -04:00
writeerr :
return - 1 ;
}
2015-01-18 15:54:30 -05:00
ssize_t rdbSaveLzfStringObject ( rio * rdb , unsigned char * s , size_t len ) {
2014-12-10 21:26:31 -05:00
size_t comprlen , outlen ;
void * out ;
/* We require at least four bytes compression for this to be worth it */
if ( len < = 4 ) return 0 ;
outlen = len - 4 ;
if ( ( out = zmalloc ( outlen + 1 ) ) = = NULL ) return 0 ;
comprlen = lzf_compress ( s , len , out , outlen ) ;
if ( comprlen = = 0 ) {
zfree ( out ) ;
return 0 ;
}
2015-01-18 15:54:30 -05:00
ssize_t nwritten = rdbSaveLzfBlob ( rdb , out , comprlen , len ) ;
2014-12-10 21:26:31 -05:00
zfree ( out ) ;
return nwritten ;
}
2014-12-23 13:26:34 -05:00
/* Load an LZF compressed string in RDB format. The returned value
* changes according to ' flags ' . For more info check the
* rdbGenericLoadStringObject ( ) function . */
2016-05-18 05:45:40 -04:00
void * rdbLoadLzfStringObject ( rio * rdb , int flags , size_t * lenptr ) {
2014-12-23 13:26:34 -05:00
int plain = flags & RDB_LOAD_PLAIN ;
2015-07-31 12:01:23 -04:00
int sds = flags & RDB_LOAD_SDS ;
2016-06-01 14:18:28 -04:00
uint64_t len , clen ;
2011-05-13 17:24:19 -04:00
unsigned char * c = NULL ;
2015-07-31 12:01:23 -04:00
char * val = NULL ;
2011-05-13 17:24:19 -04:00
2015-07-27 03:41:48 -04:00
if ( ( clen = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
if ( ( len = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
2011-05-13 17:24:19 -04:00
if ( ( c = zmalloc ( clen ) ) = = NULL ) goto err ;
2014-12-23 13:26:34 -05:00
/* Allocate our target according to the uncompressed size. */
if ( plain ) {
val = zmalloc ( len ) ;
2016-05-18 05:45:40 -04:00
if ( lenptr ) * lenptr = len ;
2014-12-23 13:26:34 -05:00
} else {
2015-07-31 12:01:23 -04:00
val = sdsnewlen ( NULL , len ) ;
2014-12-23 13:26:34 -05:00
}
/* Load the compressed representation and uncompress it to target. */
2011-05-13 17:24:19 -04:00
if ( rioRead ( rdb , c , clen ) = = 0 ) goto err ;
2016-07-01 05:59:25 -04:00
if ( lzf_decompress ( c , clen , val , len ) = = 0 ) {
if ( rdbCheckMode ) rdbCheckSetError ( " Invalid LZF compressed string " ) ;
goto err ;
}
2011-05-13 17:24:19 -04:00
zfree ( c ) ;
2014-12-23 13:26:34 -05:00
2015-07-31 12:01:23 -04:00
if ( plain | | sds ) {
2014-12-23 13:26:34 -05:00
return val ;
2015-07-31 12:01:23 -04:00
} else {
2015-07-26 09:28:00 -04:00
return createObject ( OBJ_STRING , val ) ;
2015-07-31 12:01:23 -04:00
}
2011-05-13 17:24:19 -04:00
err :
zfree ( c ) ;
2014-12-23 13:26:34 -05:00
if ( plain )
zfree ( val ) ;
else
sdsfree ( val ) ;
2011-05-13 17:24:19 -04:00
return NULL ;
}
2013-01-16 12:00:20 -05:00
/* Save a string object as [len][data] on disk. If the object is a string
2011-02-28 03:56:48 -05:00
* representation of an integer value we try to save it in a special form */
2015-01-18 15:54:30 -05:00
ssize_t rdbSaveRawString ( rio * rdb , unsigned char * s , size_t len ) {
2010-06-21 18:07:48 -04:00
int enclen ;
2015-01-18 15:54:30 -05:00
ssize_t n , nwritten = 0 ;
2010-06-21 18:07:48 -04:00
/* Try integer encoding */
if ( len < = 11 ) {
unsigned char buf [ 5 ] ;
if ( ( enclen = rdbTryIntegerEncoding ( ( char * ) s , len , buf ) ) > 0 ) {
2011-05-13 11:31:00 -04:00
if ( rdbWriteRaw ( rdb , buf , enclen ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
return enclen ;
2010-06-21 18:07:48 -04:00
}
}
/* Try LZF compression - under 20 bytes it's unable to compress even
* aaaaaaaaaaaaaaaaaa so skip it */
2011-12-21 06:22:13 -05:00
if ( server . rdb_compression & & len > 20 ) {
2011-05-13 11:31:00 -04:00
n = rdbSaveLzfStringObject ( rdb , s , len ) ;
2010-11-21 09:39:34 -05:00
if ( n = = - 1 ) return - 1 ;
if ( n > 0 ) return n ;
/* Return value of 0 means data can't be compressed, save the old way */
2010-06-21 18:07:48 -04:00
}
/* Store verbatim */
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveLen ( rdb , len ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
if ( len > 0 ) {
2011-05-13 11:31:00 -04:00
if ( rdbWriteRaw ( rdb , s , len ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = len ;
}
return nwritten ;
2010-06-21 18:07:48 -04:00
}
/* Save a long long value as either an encoded string or a string. */
2015-01-18 15:54:30 -05:00
ssize_t rdbSaveLongLongAsStringObject ( rio * rdb , long long value ) {
2010-06-21 18:07:48 -04:00
unsigned char buf [ 32 ] ;
2015-01-18 15:54:30 -05:00
ssize_t n , nwritten = 0 ;
2010-06-21 18:07:48 -04:00
int enclen = rdbEncodeInteger ( value , buf ) ;
if ( enclen > 0 ) {
2011-05-13 11:31:00 -04:00
return rdbWriteRaw ( rdb , buf , enclen ) ;
2010-06-21 18:07:48 -04:00
} else {
/* Encode as string */
enclen = ll2string ( ( char * ) buf , 32 , value ) ;
2015-07-26 09:29:53 -04:00
serverAssert ( enclen < 32 ) ;
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveLen ( rdb , enclen ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2011-05-13 11:31:00 -04:00
if ( ( n = rdbWriteRaw ( rdb , buf , enclen ) ) = = - 1 ) return - 1 ;
2010-11-21 10:12:25 -05:00
nwritten + = n ;
2010-06-21 18:07:48 -04:00
}
2010-11-21 09:39:34 -05:00
return nwritten ;
2010-06-21 18:07:48 -04:00
}
2015-07-31 12:01:23 -04:00
/* Like rdbSaveRawString() gets a Redis object instead. */
2011-05-13 11:31:00 -04:00
int rdbSaveStringObject ( rio * rdb , robj * obj ) {
2010-06-21 18:07:48 -04:00
/* Avoid to decode the object, then encode it again, if the
2013-01-16 12:00:20 -05:00
* object is already integer encoded . */
2015-07-26 09:28:00 -04:00
if ( obj - > encoding = = OBJ_ENCODING_INT ) {
2011-05-13 11:31:00 -04:00
return rdbSaveLongLongAsStringObject ( rdb , ( long ) obj - > ptr ) ;
2010-06-21 18:07:48 -04:00
} else {
2015-07-26 09:29:53 -04:00
serverAssertWithInfo ( NULL , obj , sdsEncodedObject ( obj ) ) ;
2011-05-13 11:31:00 -04:00
return rdbSaveRawString ( rdb , obj - > ptr , sdslen ( obj - > ptr ) ) ;
2010-06-21 18:07:48 -04:00
}
}
2014-12-23 13:26:34 -05:00
/* Load a string object from an RDB file according to flags:
*
* RDB_LOAD_NONE ( no flags ) : load an RDB object , unencoded .
* RDB_LOAD_ENC : If the returned type is a Redis object , try to
* encode it in a special way to be more memory
* efficient . When this flag is passed the function
* no longer guarantees that obj - > ptr is an SDS string .
* RDB_LOAD_PLAIN : Return a plain string allocated with zmalloc ( )
2016-04-25 09:49:57 -04:00
* instead of a Redis object with an sds in it .
2015-07-31 12:01:23 -04:00
* RDB_LOAD_SDS : Return an SDS string instead of a Redis object .
2016-05-18 05:45:40 -04:00
*
* On I / O error NULL is returned .
*/
void * rdbGenericLoadStringObject ( rio * rdb , int flags , size_t * lenptr ) {
2014-12-23 13:26:34 -05:00
int encode = flags & RDB_LOAD_ENC ;
int plain = flags & RDB_LOAD_PLAIN ;
2015-07-31 12:01:23 -04:00
int sds = flags & RDB_LOAD_SDS ;
2011-05-13 17:24:19 -04:00
int isencoded ;
2016-06-01 14:18:28 -04:00
uint64_t len ;
2011-05-13 17:24:19 -04:00
len = rdbLoadLen ( rdb , & isencoded ) ;
if ( isencoded ) {
switch ( len ) {
2015-07-27 03:41:48 -04:00
case RDB_ENC_INT8 :
case RDB_ENC_INT16 :
case RDB_ENC_INT32 :
2016-05-18 05:45:40 -04:00
return rdbLoadIntegerObject ( rdb , len , flags , lenptr ) ;
2015-07-27 03:41:48 -04:00
case RDB_ENC_LZF :
2016-05-18 05:45:40 -04:00
return rdbLoadLzfStringObject ( rdb , flags , lenptr ) ;
2011-05-13 17:24:19 -04:00
default :
2016-07-01 09:26:55 -04:00
rdbExitReportCorruptRDB ( " Unknown RDB string encoding type %d " , len ) ;
2011-05-13 17:24:19 -04:00
}
}
2015-07-27 03:41:48 -04:00
if ( len = = RDB_LENERR ) return NULL ;
2015-07-31 12:01:23 -04:00
if ( plain | | sds ) {
void * buf = plain ? zmalloc ( len ) : sdsnewlen ( NULL , len ) ;
2016-05-18 05:45:40 -04:00
if ( lenptr ) * lenptr = len ;
2015-07-31 12:01:23 -04:00
if ( len & & rioRead ( rdb , buf , len ) = = 0 ) {
if ( plain )
zfree ( buf ) ;
else
sdsfree ( buf ) ;
return NULL ;
}
return buf ;
} else {
2014-12-23 13:26:34 -05:00
robj * o = encode ? createStringObject ( NULL , len ) :
createRawStringObject ( NULL , len ) ;
if ( len & & rioRead ( rdb , o - > ptr , len ) = = 0 ) {
decrRefCount ( o ) ;
return NULL ;
}
return o ;
2011-05-13 17:24:19 -04:00
}
}
robj * rdbLoadStringObject ( rio * rdb ) {
2016-05-18 05:45:40 -04:00
return rdbGenericLoadStringObject ( rdb , RDB_LOAD_NONE , NULL ) ;
2011-05-13 17:24:19 -04:00
}
robj * rdbLoadEncodedStringObject ( rio * rdb ) {
2016-05-18 05:45:40 -04:00
return rdbGenericLoadStringObject ( rdb , RDB_LOAD_ENC , NULL ) ;
2011-05-13 17:24:19 -04:00
}
2010-06-21 18:07:48 -04:00
/* Save a double value. Doubles are saved as strings prefixed by an unsigned
2013-01-16 12:00:20 -05:00
* 8 bit integer specifying the length of the representation .
2010-06-21 18:07:48 -04:00
* This 8 bit integer has special values in order to specify the following
* conditions :
* 253 : not a number
* 254 : + inf
* 255 : - inf
*/
2011-05-13 11:31:00 -04:00
int rdbSaveDoubleValue ( rio * rdb , double val ) {
2010-06-21 18:07:48 -04:00
unsigned char buf [ 128 ] ;
int len ;
if ( isnan ( val ) ) {
buf [ 0 ] = 253 ;
len = 1 ;
} else if ( ! isfinite ( val ) ) {
len = 1 ;
buf [ 0 ] = ( val < 0 ) ? 255 : 254 ;
} else {
# if (DBL_MANT_DIG >= 52) && (LLONG_MAX == 0x7fffffffffffffffLL)
/* Check if the float is in a safe range to be casted into a
* long long . We are assuming that long long is 64 bit here .
* Also we are assuming that there are no implementations around where
* double has precision < 52 bit .
*
* Under this assumptions we test if a double is inside an interval
* where casting to long long is safe . Then using two castings we
* make sure the decimal part is zero . If all this is true we use
* integer printing function that is much faster . */
double min = - 4503599627370495 ; /* (2^52)-1 */
double max = 4503599627370496 ; /* -(2^52) */
if ( val > min & & val < max & & val = = ( ( double ) ( ( long long ) val ) ) )
2014-05-12 05:05:18 -04:00
ll2string ( ( char * ) buf + 1 , sizeof ( buf ) - 1 , ( long long ) val ) ;
2010-06-21 18:07:48 -04:00
else
# endif
snprintf ( ( char * ) buf + 1 , sizeof ( buf ) - 1 , " %.17g " , val ) ;
buf [ 0 ] = strlen ( ( char * ) buf + 1 ) ;
len = buf [ 0 ] + 1 ;
}
2011-05-13 11:31:00 -04:00
return rdbWriteRaw ( rdb , buf , len ) ;
2010-06-21 18:07:48 -04:00
}
2011-05-13 17:24:19 -04:00
/* For information about double serialization check rdbSaveDoubleValue() */
int rdbLoadDoubleValue ( rio * rdb , double * val ) {
2014-05-12 05:35:10 -04:00
char buf [ 256 ] ;
2011-05-13 17:24:19 -04:00
unsigned char len ;
if ( rioRead ( rdb , & len , 1 ) = = 0 ) return - 1 ;
switch ( len ) {
case 255 : * val = R_NegInf ; return 0 ;
case 254 : * val = R_PosInf ; return 0 ;
case 253 : * val = R_Nan ; return 0 ;
default :
if ( rioRead ( rdb , buf , len ) = = 0 ) return - 1 ;
buf [ len ] = ' \0 ' ;
sscanf ( buf , " %lg " , val ) ;
return 0 ;
}
}
2016-06-01 05:55:47 -04:00
/* Saves a double for RDB 8 or greater, where IE754 binary64 format is assumed.
* We just make sure the integer is always stored in little endian , otherwise
2016-05-18 05:45:40 -04:00
* the value is copied verbatim from memory to disk .
*
* Return - 1 on error , the size of the serialized value on success . */
2016-06-01 05:55:47 -04:00
int rdbSaveBinaryDoubleValue ( rio * rdb , double val ) {
memrev64ifbe ( & val ) ;
2016-10-02 18:08:35 -04:00
return rdbWriteRaw ( rdb , & val , sizeof ( val ) ) ;
2016-06-01 05:55:47 -04:00
}
/* Loads a double from RDB 8 or greater. See rdbSaveBinaryDoubleValue() for
2016-05-18 05:45:40 -04:00
* more info . On error - 1 is returned , otherwise 0. */
2016-06-01 05:55:47 -04:00
int rdbLoadBinaryDoubleValue ( rio * rdb , double * val ) {
2016-10-02 18:08:35 -04:00
if ( rioRead ( rdb , val , sizeof ( * val ) ) = = 0 ) return - 1 ;
2016-06-01 05:55:47 -04:00
memrev64ifbe ( val ) ;
return 0 ;
}
2016-10-02 18:08:35 -04:00
/* Like rdbSaveBinaryDoubleValue() but single precision. */
int rdbSaveBinaryFloatValue ( rio * rdb , float val ) {
memrev32ifbe ( & val ) ;
return rdbWriteRaw ( rdb , & val , sizeof ( val ) ) ;
}
/* Like rdbLoadBinaryDoubleValue() but single precision. */
int rdbLoadBinaryFloatValue ( rio * rdb , float * val ) {
if ( rioRead ( rdb , val , sizeof ( * val ) ) = = 0 ) return - 1 ;
memrev32ifbe ( val ) ;
return 0 ;
}
2011-05-13 17:24:19 -04:00
/* Save the object type of object "o". */
int rdbSaveObjectType ( rio * rdb , robj * o ) {
switch ( o - > type ) {
2015-07-26 09:28:00 -04:00
case OBJ_STRING :
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_STRING ) ;
2015-07-26 09:28:00 -04:00
case OBJ_LIST :
if ( o - > encoding = = OBJ_ENCODING_QUICKLIST )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_LIST_QUICKLIST ) ;
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown list encoding " ) ;
2015-07-26 09:28:00 -04:00
case OBJ_SET :
if ( o - > encoding = = OBJ_ENCODING_INTSET )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_SET_INTSET ) ;
2015-07-26 09:28:00 -04:00
else if ( o - > encoding = = OBJ_ENCODING_HT )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_SET ) ;
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown set encoding " ) ;
2015-07-26 09:28:00 -04:00
case OBJ_ZSET :
if ( o - > encoding = = OBJ_ENCODING_ZIPLIST )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_ZSET_ZIPLIST ) ;
2015-07-26 09:28:00 -04:00
else if ( o - > encoding = = OBJ_ENCODING_SKIPLIST )
2016-06-01 05:55:47 -04:00
return rdbSaveType ( rdb , RDB_TYPE_ZSET_2 ) ;
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown sorted set encoding " ) ;
2015-07-26 09:28:00 -04:00
case OBJ_HASH :
if ( o - > encoding = = OBJ_ENCODING_ZIPLIST )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_HASH_ZIPLIST ) ;
2015-07-26 09:28:00 -04:00
else if ( o - > encoding = = OBJ_ENCODING_HT )
2015-07-27 03:41:48 -04:00
return rdbSaveType ( rdb , RDB_TYPE_HASH ) ;
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown hash encoding " ) ;
2016-05-18 05:45:40 -04:00
case OBJ_MODULE :
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
return rdbSaveType ( rdb , RDB_TYPE_MODULE_2 ) ;
2011-05-13 17:24:19 -04:00
default :
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown object type " ) ;
2011-05-13 17:24:19 -04:00
}
return - 1 ; /* avoid warning */
}
2012-06-02 04:21:57 -04:00
/* Use rdbLoadType() to load a TYPE in RDB format, but returns -1 if the
* type is not specifically a valid Object Type . */
2011-05-13 17:24:19 -04:00
int rdbLoadObjectType ( rio * rdb ) {
int type ;
if ( ( type = rdbLoadType ( rdb ) ) = = - 1 ) return - 1 ;
if ( ! rdbIsObjectType ( type ) ) return - 1 ;
return type ;
2010-06-21 18:07:48 -04:00
}
2014-06-10 20:35:46 -04:00
/* Save a Redis object. Returns -1 on error, number of bytes written on success. */
2015-01-18 15:54:30 -05:00
ssize_t rdbSaveObject ( rio * rdb , robj * o ) {
ssize_t n = 0 , nwritten = 0 ;
2010-11-21 09:39:34 -05:00
2015-07-26 09:28:00 -04:00
if ( o - > type = = OBJ_STRING ) {
2010-06-21 18:07:48 -04:00
/* Save a string value */
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveStringObject ( rdb , o ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2015-07-26 09:28:00 -04:00
} else if ( o - > type = = OBJ_LIST ) {
2010-06-21 18:07:48 -04:00
/* Save a list value */
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_QUICKLIST ) {
2014-12-10 13:53:12 -05:00
quicklist * ql = o - > ptr ;
quicklistNode * node = ql - > head ;
2010-06-21 18:07:48 -04:00
2014-12-10 13:53:12 -05:00
if ( ( n = rdbSaveLen ( rdb , ql - > len ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2014-12-10 13:53:12 -05:00
do {
2014-12-10 21:26:31 -05:00
if ( quicklistNodeIsCompressed ( node ) ) {
void * data ;
size_t compress_len = quicklistGetLzf ( node , & data ) ;
if ( ( n = rdbSaveLzfBlob ( rdb , data , compress_len , node - > sz ) ) = = - 1 ) return - 1 ;
nwritten + = n ;
} else {
if ( ( n = rdbSaveRawString ( rdb , node - > zl , node - > sz ) ) = = - 1 ) return - 1 ;
nwritten + = n ;
}
2014-12-10 13:53:12 -05:00
} while ( ( node = node - > next ) ) ;
2010-06-21 18:07:48 -04:00
} else {
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown list encoding " ) ;
2010-06-21 18:07:48 -04:00
}
2015-07-26 09:28:00 -04:00
} else if ( o - > type = = OBJ_SET ) {
2010-06-21 18:07:48 -04:00
/* Save a set value */
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_HT ) {
2010-07-02 13:57:12 -04:00
dict * set = o - > ptr ;
dictIterator * di = dictGetIterator ( set ) ;
dictEntry * de ;
2010-06-21 18:07:48 -04:00
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveLen ( rdb , dictSize ( set ) ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2010-07-02 13:57:12 -04:00
while ( ( de = dictNext ( di ) ) ! = NULL ) {
2015-07-31 12:01:23 -04:00
sds ele = dictGetKey ( de ) ;
if ( ( n = rdbSaveRawString ( rdb , ( unsigned char * ) ele , sdslen ( ele ) ) )
= = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2010-07-02 13:57:12 -04:00
}
dictReleaseIterator ( di ) ;
2015-07-26 09:28:00 -04:00
} else if ( o - > encoding = = OBJ_ENCODING_INTSET ) {
2011-02-28 11:53:47 -05:00
size_t l = intsetBlobLen ( ( intset * ) o - > ptr ) ;
2010-07-02 13:57:12 -04:00
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveRawString ( rdb , o - > ptr , l ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2010-07-02 13:57:12 -04:00
} else {
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown set encoding " ) ;
2010-06-21 18:07:48 -04:00
}
2015-07-26 09:28:00 -04:00
} else if ( o - > type = = OBJ_ZSET ) {
2011-03-09 07:16:38 -05:00
/* Save a sorted set value */
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_ZIPLIST ) {
2011-03-09 07:16:38 -05:00
size_t l = ziplistBlobLen ( ( unsigned char * ) o - > ptr ) ;
2010-06-21 18:07:48 -04:00
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveRawString ( rdb , o - > ptr , l ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2015-07-26 09:28:00 -04:00
} else if ( o - > encoding = = OBJ_ENCODING_SKIPLIST ) {
2011-03-09 07:16:38 -05:00
zset * zs = o - > ptr ;
2017-03-31 09:45:00 -04:00
zskiplist * zsl = zs - > zsl ;
2011-03-09 07:16:38 -05:00
2017-03-31 09:45:00 -04:00
if ( ( n = rdbSaveLen ( rdb , zsl - > length ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2011-03-09 07:16:38 -05:00
2017-04-18 05:01:47 -04:00
/* We save the skiplist elements from the greatest to the smallest
* ( that ' s trivial since the elements are already ordered in the
* skiplist ) : this improves the load process , since the next loaded
* element will always be the smaller , so adding to the skiplist
* will always immediately stop at the head , making the insertion
* O ( 1 ) instead of O ( log ( N ) ) . */
2017-03-31 09:45:00 -04:00
zskiplistNode * zn = zsl - > tail ;
while ( zn ! = NULL ) {
2017-04-18 05:01:47 -04:00
if ( ( n = rdbSaveRawString ( rdb ,
( unsigned char * ) zn - > ele , sdslen ( zn - > ele ) ) ) = = - 1 )
{
return - 1 ;
}
2011-03-09 07:16:38 -05:00
nwritten + = n ;
2017-04-18 05:01:47 -04:00
if ( ( n = rdbSaveBinaryDoubleValue ( rdb , zn - > score ) ) = = - 1 )
return - 1 ;
2011-03-09 07:16:38 -05:00
nwritten + = n ;
2017-03-31 09:45:00 -04:00
zn = zn - > backward ;
2011-03-09 07:16:38 -05:00
}
} else {
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown sorted set encoding " ) ;
2010-06-21 18:07:48 -04:00
}
2015-07-26 09:28:00 -04:00
} else if ( o - > type = = OBJ_HASH ) {
2010-06-21 18:07:48 -04:00
/* Save a hash value */
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_ZIPLIST ) {
2012-01-03 01:14:10 -05:00
size_t l = ziplistBlobLen ( ( unsigned char * ) o - > ptr ) ;
2010-06-21 18:07:48 -04:00
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveRawString ( rdb , o - > ptr , l ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2012-01-03 01:14:10 -05:00
2015-07-26 09:28:00 -04:00
} else if ( o - > encoding = = OBJ_ENCODING_HT ) {
2010-06-21 18:07:48 -04:00
dictIterator * di = dictGetIterator ( o - > ptr ) ;
dictEntry * de ;
2011-05-13 11:31:00 -04:00
if ( ( n = rdbSaveLen ( rdb , dictSize ( ( dict * ) o - > ptr ) ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2010-06-21 18:07:48 -04:00
while ( ( de = dictNext ( di ) ) ! = NULL ) {
2015-09-23 04:34:53 -04:00
sds field = dictGetKey ( de ) ;
sds value = dictGetVal ( de ) ;
2010-06-21 18:07:48 -04:00
2015-09-23 04:34:53 -04:00
if ( ( n = rdbSaveRawString ( rdb , ( unsigned char * ) field ,
sdslen ( field ) ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2015-09-23 04:34:53 -04:00
if ( ( n = rdbSaveRawString ( rdb , ( unsigned char * ) value ,
sdslen ( value ) ) ) = = - 1 ) return - 1 ;
2010-11-21 09:39:34 -05:00
nwritten + = n ;
2010-06-21 18:07:48 -04:00
}
dictReleaseIterator ( di ) ;
2012-01-03 01:14:10 -05:00
} else {
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown hash encoding " ) ;
2010-06-21 18:07:48 -04:00
}
2012-01-03 01:14:10 -05:00
2016-05-18 05:45:40 -04:00
} else if ( o - > type = = OBJ_MODULE ) {
/* Save a module-specific value. */
RedisModuleIO io ;
moduleValue * mv = o - > ptr ;
moduleType * mt = mv - > type ;
moduleInitIOContext ( io , mt , rdb ) ;
/* Write the "module" identifier as prefix, so that we'll be able
* to call the right module during loading . */
int retval = rdbSaveLen ( rdb , mt - > id ) ;
if ( retval = = - 1 ) return - 1 ;
io . bytes + = retval ;
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* Then write the module-specific representation + EOF marker. */
2016-05-18 05:45:40 -04:00
mt - > rdb_save ( & io , mv - > value ) ;
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
retval = rdbSaveLen ( rdb , RDB_MODULE_OPCODE_EOF ) ;
if ( retval = = - 1 ) return - 1 ;
io . bytes + = retval ;
2016-10-06 11:05:38 -04:00
if ( io . ctx ) {
moduleFreeContext ( io . ctx ) ;
zfree ( io . ctx ) ;
}
2016-06-05 09:34:43 -04:00
return io . error ? - 1 : ( ssize_t ) io . bytes ;
2010-06-21 18:07:48 -04:00
} else {
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown object type " ) ;
2010-06-21 18:07:48 -04:00
}
2010-11-21 09:39:34 -05:00
return nwritten ;
2010-06-21 18:07:48 -04:00
}
/* Return the length the object will have on disk if saved with
* the rdbSaveObject ( ) function . Currently we use a trick to get
* this length with very little changes to the code . In the future
* we could switch to a faster solution . */
2015-01-18 15:54:30 -05:00
size_t rdbSavedObjectLen ( robj * o ) {
ssize_t len = rdbSaveObject ( NULL , o ) ;
2015-07-26 09:29:53 -04:00
serverAssertWithInfo ( NULL , o , len ! = - 1 ) ;
2010-11-21 10:27:47 -05:00
return len ;
2010-06-21 18:07:48 -04:00
}
2010-12-30 10:41:36 -05:00
/* Save a key-value pair, with expire time, type, key, value.
* On error - 1 is returned .
2013-01-16 12:00:20 -05:00
* On success if the key was actually saved 1 is returned , otherwise 0
2010-12-30 10:41:36 -05:00
* is returned ( the key was already expired ) . */
2011-05-13 11:31:00 -04:00
int rdbSaveKeyValuePair ( rio * rdb , robj * key , robj * val ,
2011-11-09 10:51:19 -05:00
long long expiretime , long long now )
2010-12-30 10:41:36 -05:00
{
/* Save the expire time */
if ( expiretime ! = - 1 ) {
/* If this key is already expired skip it */
if ( expiretime < now ) return 0 ;
2015-07-27 03:41:48 -04:00
if ( rdbSaveType ( rdb , RDB_OPCODE_EXPIRETIME_MS ) = = - 1 ) return - 1 ;
2011-11-09 10:51:19 -05:00
if ( rdbSaveMillisecondTime ( rdb , expiretime ) = = - 1 ) return - 1 ;
2010-12-30 10:41:36 -05:00
}
2011-05-13 16:14:39 -04:00
2010-12-30 10:41:36 -05:00
/* Save type, key, value */
2011-05-13 16:14:39 -04:00
if ( rdbSaveObjectType ( rdb , val ) = = - 1 ) return - 1 ;
2011-05-13 11:31:00 -04:00
if ( rdbSaveStringObject ( rdb , key ) = = - 1 ) return - 1 ;
if ( rdbSaveObject ( rdb , val ) = = - 1 ) return - 1 ;
2010-12-30 10:41:36 -05:00
return 1 ;
}
2015-01-08 02:56:35 -05:00
/* Save an AUX field. */
int rdbSaveAuxField ( rio * rdb , void * key , size_t keylen , void * val , size_t vallen ) {
2015-07-27 03:41:48 -04:00
if ( rdbSaveType ( rdb , RDB_OPCODE_AUX ) = = - 1 ) return - 1 ;
2015-01-08 02:56:35 -05:00
if ( rdbSaveRawString ( rdb , key , keylen ) = = - 1 ) return - 1 ;
if ( rdbSaveRawString ( rdb , val , vallen ) = = - 1 ) return - 1 ;
return 1 ;
}
/* Wrapper for rdbSaveAuxField() used when key/val length can be obtained
* with strlen ( ) . */
int rdbSaveAuxFieldStrStr ( rio * rdb , char * key , char * val ) {
return rdbSaveAuxField ( rdb , key , strlen ( key ) , val , strlen ( val ) ) ;
}
/* Wrapper for strlen(key) + integer type (up to long long range). */
int rdbSaveAuxFieldStrInt ( rio * rdb , char * key , long long val ) {
2015-07-27 03:41:48 -04:00
char buf [ LONG_STR_SIZE ] ;
2015-01-08 02:56:35 -05:00
int vlen = ll2string ( buf , sizeof ( buf ) , val ) ;
return rdbSaveAuxField ( rdb , key , strlen ( key ) , buf , vlen ) ;
}
/* Save a few default AUX fields with information about the RDB generated. */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveInfoAuxFields ( rio * rdb , int flags , rdbSaveInfo * rsi ) {
2015-01-08 03:08:55 -05:00
int redis_bits = ( sizeof ( void * ) = = 8 ) ? 64 : 32 ;
2016-08-09 05:07:32 -04:00
int aof_preamble = ( flags & RDB_SAVE_AOF_PREAMBLE ) ! = 0 ;
2015-01-08 03:08:55 -05:00
2015-01-08 06:06:17 -05:00
/* Add a few fields about the state when the RDB was created. */
2015-01-08 02:56:35 -05:00
if ( rdbSaveAuxFieldStrStr ( rdb , " redis-ver " , REDIS_VERSION ) = = - 1 ) return - 1 ;
2015-01-08 03:08:55 -05:00
if ( rdbSaveAuxFieldStrInt ( rdb , " redis-bits " , redis_bits ) = = - 1 ) return - 1 ;
2015-01-08 02:56:35 -05:00
if ( rdbSaveAuxFieldStrInt ( rdb , " ctime " , time ( NULL ) ) = = - 1 ) return - 1 ;
2015-01-08 03:08:55 -05:00
if ( rdbSaveAuxFieldStrInt ( rdb , " used-mem " , zmalloc_used_memory ( ) ) = = - 1 ) return - 1 ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
/* Handle saving options that generate aux fields. */
if ( rsi ) {
2017-09-19 17:03:39 -04:00
if ( rdbSaveAuxFieldStrInt ( rdb , " repl-stream-db " , rsi - > repl_stream_db )
= = - 1 ) return - 1 ;
if ( rdbSaveAuxFieldStrStr ( rdb , " repl-id " , server . replid )
= = - 1 ) return - 1 ;
if ( rdbSaveAuxFieldStrInt ( rdb , " repl-offset " , server . master_repl_offset )
= = - 1 ) return - 1 ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
}
2016-08-09 10:41:40 -04:00
if ( rdbSaveAuxFieldStrInt ( rdb , " aof-preamble " , aof_preamble ) = = - 1 ) return - 1 ;
2015-01-08 02:56:35 -05:00
return 1 ;
}
2014-10-07 06:56:23 -04:00
/* Produces a dump of the database in RDB format sending it to the specified
2015-07-26 17:17:55 -04:00
* Redis I / O channel . On success C_OK is returned , otherwise C_ERR
2014-10-07 06:56:23 -04:00
* is returned and part of the output , or all the output , can be
* missing because of I / O errors .
*
2015-07-26 17:17:55 -04:00
* When the function returns C_ERR and if ' error ' is not NULL , the
2014-10-07 06:56:23 -04:00
* integer pointed by ' error ' is set to the value of errno just after the I / O
* error . */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveRio ( rio * rdb , int * error , int flags , rdbSaveInfo * rsi ) {
2010-06-21 18:07:48 -04:00
dictIterator * di = NULL ;
dictEntry * de ;
2012-03-31 11:08:40 -04:00
char magic [ 10 ] ;
2010-06-21 18:07:48 -04:00
int j ;
2011-11-11 19:04:27 -05:00
long long now = mstime ( ) ;
2012-04-09 16:40:41 -04:00
uint64_t cksum ;
2016-08-09 05:07:32 -04:00
size_t processed = 0 ;
2010-06-21 18:07:48 -04:00
2012-04-10 09:47:10 -04:00
if ( server . rdb_checksum )
2014-10-07 06:56:23 -04:00
rdb - > update_cksum = rioGenericUpdateChecksum ;
2015-07-27 03:41:48 -04:00
snprintf ( magic , sizeof ( magic ) , " REDIS%04d " , RDB_VERSION ) ;
2014-10-07 06:56:23 -04:00
if ( rdbWriteRaw ( rdb , magic , 9 ) = = - 1 ) goto werr ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
if ( rdbSaveInfoAuxFields ( rdb , flags , rsi ) = = - 1 ) goto werr ;
2011-05-13 11:31:00 -04:00
2010-06-21 18:07:48 -04:00
for ( j = 0 ; j < server . dbnum ; j + + ) {
redisDb * db = server . db + j ;
dict * d = db - > dict ;
if ( dictSize ( d ) = = 0 ) continue ;
2011-06-17 09:40:55 -04:00
di = dictGetSafeIterator ( d ) ;
2015-07-26 17:17:55 -04:00
if ( ! di ) return C_ERR ;
2010-06-21 18:07:48 -04:00
/* Write the SELECT DB opcode */
2015-07-27 03:41:48 -04:00
if ( rdbSaveType ( rdb , RDB_OPCODE_SELECTDB ) = = - 1 ) goto werr ;
2014-10-07 06:56:23 -04:00
if ( rdbSaveLen ( rdb , j ) = = - 1 ) goto werr ;
2010-06-21 18:07:48 -04:00
2015-01-07 05:08:41 -05:00
/* Write the RESIZE DB opcode. We trim the size to UINT32_MAX, which
* is currently the largest type we are able to represent in RDB sizes .
* However this does not limit the actual size of the DB to load since
* these sizes are just hints to resize the hash tables . */
uint32_t db_size , expires_size ;
db_size = ( dictSize ( db - > dict ) < = UINT32_MAX ) ?
dictSize ( db - > dict ) :
UINT32_MAX ;
2016-04-25 07:19:28 -04:00
expires_size = ( dictSize ( db - > expires ) < = UINT32_MAX ) ?
2015-01-07 05:08:41 -05:00
dictSize ( db - > expires ) :
UINT32_MAX ;
2015-07-27 03:41:48 -04:00
if ( rdbSaveType ( rdb , RDB_OPCODE_RESIZEDB ) = = - 1 ) goto werr ;
2015-01-07 05:08:41 -05:00
if ( rdbSaveLen ( rdb , db_size ) = = - 1 ) goto werr ;
if ( rdbSaveLen ( rdb , expires_size ) = = - 1 ) goto werr ;
2010-06-21 18:07:48 -04:00
/* Iterate this DB writing every entry */
while ( ( de = dictNext ( di ) ) ! = NULL ) {
2011-11-08 11:07:55 -05:00
sds keystr = dictGetKey ( de ) ;
robj key , * o = dictGetVal ( de ) ;
2011-11-09 10:51:19 -05:00
long long expire ;
2014-06-26 12:48:40 -04:00
2010-06-21 18:07:48 -04:00
initStaticStringObject ( key , keystr ) ;
2011-02-11 05:16:15 -05:00
expire = getExpire ( db , & key ) ;
2014-10-07 06:56:23 -04:00
if ( rdbSaveKeyValuePair ( rdb , & key , o , expire , now ) = = - 1 ) goto werr ;
2016-08-09 05:07:32 -04:00
/* When this RDB is produced as part of an AOF rewrite, move
* accumulated diff from parent to child while rewriting in
* order to have a smaller final write . */
if ( flags & RDB_SAVE_AOF_PREAMBLE & &
2016-08-09 10:41:40 -04:00
rdb - > processed_bytes > processed + AOF_READ_DIFF_INTERVAL_BYTES )
2016-08-09 05:07:32 -04:00
{
2016-08-09 10:41:40 -04:00
processed = rdb - > processed_bytes ;
2016-08-09 05:07:32 -04:00
aofReadDiffFromParent ( ) ;
}
2010-06-21 18:07:48 -04:00
}
dictReleaseIterator ( di ) ;
}
2012-04-09 16:40:41 -04:00
di = NULL ; /* So that we don't release it again on error. */
2010-06-21 18:07:48 -04:00
/* EOF opcode */
2015-07-27 03:41:48 -04:00
if ( rdbSaveType ( rdb , RDB_OPCODE_EOF ) = = - 1 ) goto werr ;
2010-06-21 18:07:48 -04:00
2012-04-10 09:47:10 -04:00
/* CRC64 checksum. It will be zero if checksum computation is disabled, the
* loading code skips the check in this case . */
2014-10-07 06:56:23 -04:00
cksum = rdb - > cksum ;
2012-04-09 16:40:41 -04:00
memrev64ifbe ( & cksum ) ;
2014-10-07 06:56:23 -04:00
if ( rioWrite ( rdb , & cksum , 8 ) = = 0 ) goto werr ;
2015-07-26 17:17:55 -04:00
return C_OK ;
2014-10-07 06:56:23 -04:00
werr :
if ( error ) * error = errno ;
if ( di ) dictReleaseIterator ( di ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2014-10-07 06:56:23 -04:00
}
2014-10-14 04:11:26 -04:00
/* This is just a wrapper to rdbSaveRio() that additionally adds a prefix
* and a suffix to the generated RDB dump . The prefix is :
*
* $ EOF : < 40 bytes unguessable hex string > \ r \ n
*
* While the suffix is the 40 bytes hex string we announced in the prefix .
* This way processes receiving the payload can understand when it ends
* without doing any processing of the content . */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveRioWithEOFMark ( rio * rdb , int * error , rdbSaveInfo * rsi ) {
2015-07-27 03:41:48 -04:00
char eofmark [ RDB_EOF_MARK_SIZE ] ;
2014-10-14 04:11:26 -04:00
2015-07-27 03:41:48 -04:00
getRandomHexChars ( eofmark , RDB_EOF_MARK_SIZE ) ;
2014-10-14 04:11:26 -04:00
if ( error ) * error = 0 ;
if ( rioWrite ( rdb , " $EOF: " , 5 ) = = 0 ) goto werr ;
2015-07-27 03:41:48 -04:00
if ( rioWrite ( rdb , eofmark , RDB_EOF_MARK_SIZE ) = = 0 ) goto werr ;
2014-10-14 04:11:26 -04:00
if ( rioWrite ( rdb , " \r \n " , 2 ) = = 0 ) goto werr ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
if ( rdbSaveRio ( rdb , error , RDB_SAVE_NONE , rsi ) = = C_ERR ) goto werr ;
2015-07-27 03:41:48 -04:00
if ( rioWrite ( rdb , eofmark , RDB_EOF_MARK_SIZE ) = = 0 ) goto werr ;
2015-07-26 17:17:55 -04:00
return C_OK ;
2014-10-14 04:11:26 -04:00
werr : /* Write error. */
/* Set 'error' only if not already set by rdbSaveRio() call. */
if ( error & & * error = = 0 ) * error = errno ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2014-10-14 04:11:26 -04:00
}
2015-07-26 17:17:55 -04:00
/* Save the DB on disk. Return C_ERR on error, C_OK on success. */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSave ( char * filename , rdbSaveInfo * rsi ) {
2014-10-07 06:56:23 -04:00
char tmpfile [ 256 ] ;
2016-02-15 10:14:56 -05:00
char cwd [ MAXPATHLEN ] ; /* Current working dir path for error messages. */
2014-10-07 06:56:23 -04:00
FILE * fp ;
rio rdb ;
2014-11-13 23:35:10 -05:00
int error = 0 ;
2014-10-07 06:56:23 -04:00
snprintf ( tmpfile , 256 , " temp-%d.rdb " , ( int ) getpid ( ) ) ;
fp = fopen ( tmpfile , " w " ) ;
if ( ! fp ) {
2016-02-15 10:14:56 -05:00
char * cwdp = getcwd ( cwd , MAXPATHLEN ) ;
serverLog ( LL_WARNING ,
" Failed opening the RDB file %s (in server root dir %s) "
" for saving: %s " ,
filename ,
cwdp ? cwdp : " unknown " ,
2014-10-07 06:56:23 -04:00
strerror ( errno ) ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2014-10-07 06:56:23 -04:00
}
rioInitWithFile ( & rdb , fp ) ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
if ( rdbSaveRio ( & rdb , & error , RDB_SAVE_NONE , rsi ) = = C_ERR ) {
2014-10-07 06:56:23 -04:00
errno = error ;
goto werr ;
}
2012-04-09 16:40:41 -04:00
2010-06-21 18:07:48 -04:00
/* Make sure data will not remain on the OS's output buffers */
2014-03-13 14:40:25 -04:00
if ( fflush ( fp ) = = EOF ) goto werr ;
if ( fsync ( fileno ( fp ) ) = = - 1 ) goto werr ;
if ( fclose ( fp ) = = EOF ) goto werr ;
2010-06-21 18:07:48 -04:00
/* Use RENAME to make sure the DB file is changed atomically only
* if the generate DB file is ok . */
if ( rename ( tmpfile , filename ) = = - 1 ) {
2016-02-15 10:14:56 -05:00
char * cwdp = getcwd ( cwd , MAXPATHLEN ) ;
serverLog ( LL_WARNING ,
" Error moving temp DB file %s on the final "
" destination %s (in server root dir %s): %s " ,
tmpfile ,
filename ,
cwdp ? cwdp : " unknown " ,
strerror ( errno ) ) ;
2010-06-21 18:07:48 -04:00
unlink ( tmpfile ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2010-06-21 18:07:48 -04:00
}
2016-02-15 10:14:56 -05:00
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE , " DB saved on disk " ) ;
2010-06-21 18:07:48 -04:00
server . dirty = 0 ;
server . lastsave = time ( NULL ) ;
2015-07-26 17:17:55 -04:00
server . lastbgsave_status = C_OK ;
return C_OK ;
2010-06-21 18:07:48 -04:00
werr :
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Write error saving DB on disk: %s " , strerror ( errno ) ) ;
2010-06-21 18:07:48 -04:00
fclose ( fp ) ;
unlink ( tmpfile ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2010-06-21 18:07:48 -04:00
}
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveBackground ( char * filename , rdbSaveInfo * rsi ) {
2010-06-21 18:07:48 -04:00
pid_t childpid ;
2011-05-29 09:17:29 -04:00
long long start ;
2010-06-21 18:07:48 -04:00
2016-07-21 12:34:53 -04:00
if ( server . aof_child_pid ! = - 1 | | server . rdb_child_pid ! = - 1 ) return C_ERR ;
2011-01-05 12:38:31 -05:00
2010-08-30 04:32:32 -04:00
server . dirty_before_bgsave = server . dirty ;
2013-04-02 08:05:50 -04:00
server . lastbgsave_try = time ( NULL ) ;
2016-09-19 07:45:20 -04:00
openChildInfoPipe ( ) ;
2011-01-05 12:38:31 -05:00
2011-05-29 09:17:29 -04:00
start = ustime ( ) ;
2010-06-21 18:07:48 -04:00
if ( ( childpid = fork ( ) ) = = 0 ) {
2011-01-05 12:38:31 -05:00
int retval ;
2010-06-21 18:07:48 -04:00
/* Child */
2013-07-05 05:47:20 -04:00
closeListeningSockets ( 0 ) ;
2013-02-26 05:52:12 -05:00
redisSetProcTitle ( " redis-rdb-bgsave " ) ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
retval = rdbSave ( filename , rsi ) ;
2015-07-26 17:17:55 -04:00
if ( retval = = C_OK ) {
2016-09-19 04:28:05 -04:00
size_t private_dirty = zmalloc_get_private_dirty ( - 1 ) ;
2012-11-19 06:02:08 -05:00
if ( private_dirty ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE ,
2013-08-20 06:04:57 -04:00
" RDB: %zu MB of memory used by copy-on-write " ,
2012-11-19 06:02:08 -05:00
private_dirty / ( 1024 * 1024 ) ) ;
}
2016-09-19 07:45:20 -04:00
server . child_info_data . cow_size = private_dirty ;
sendChildInfo ( CHILD_INFO_TYPE_RDB ) ;
2012-11-19 06:02:08 -05:00
}
2015-07-26 17:17:55 -04:00
exitFromChild ( ( retval = = C_OK ) ? 0 : 1 ) ;
2010-06-21 18:07:48 -04:00
} else {
/* Parent */
2011-05-29 09:17:29 -04:00
server . stat_fork_time = ustime ( ) - start ;
2014-07-08 11:05:56 -04:00
server . stat_fork_rate = ( double ) zmalloc_used_memory ( ) * 1000000 / server . stat_fork_time / ( 1024 * 1024 * 1024 ) ; /* GB per second. */
2014-07-01 11:19:08 -04:00
latencyAddSampleIfNeeded ( " fork " , server . stat_fork_time / 1000 ) ;
2010-06-21 18:07:48 -04:00
if ( childpid = = - 1 ) {
2016-09-19 07:45:20 -04:00
closeChildInfoPipe ( ) ;
2015-07-26 17:17:55 -04:00
server . lastbgsave_status = C_ERR ;
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Can't save in background: fork: %s " ,
2010-06-21 18:07:48 -04:00
strerror ( errno ) ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2010-06-21 18:07:48 -04:00
}
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE , " Background saving started by pid %d " , childpid ) ;
2012-05-25 06:11:30 -04:00
server . rdb_save_time_start = time ( NULL ) ;
2011-12-21 06:22:13 -05:00
server . rdb_child_pid = childpid ;
2015-07-27 03:41:48 -04:00
server . rdb_child_type = RDB_CHILD_TYPE_DISK ;
2010-06-21 18:07:48 -04:00
updateDictResizePolicy ( ) ;
2015-07-26 17:17:55 -04:00
return C_OK ;
2010-06-21 18:07:48 -04:00
}
2015-07-26 17:17:55 -04:00
return C_OK ; /* unreached */
2010-06-21 18:07:48 -04:00
}
void rdbRemoveTempFile ( pid_t childpid ) {
char tmpfile [ 256 ] ;
2014-08-07 10:32:50 -04:00
snprintf ( tmpfile , sizeof ( tmpfile ) , " temp-%d.rdb " , ( int ) childpid ) ;
2010-06-21 18:07:48 -04:00
unlink ( tmpfile ) ;
}
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* This function is called by rdbLoadObject() when the code is in RDB-check
* mode and we find a module value of type 2 that can be parsed without
* the need of the actual module . The value is parsed for errors , finally
* a dummy redis object is returned just to conform to the API . */
robj * rdbLoadCheckModuleValue ( rio * rdb , char * modulename ) {
uint64_t opcode ;
while ( ( opcode = rdbLoadLen ( rdb , NULL ) ) ! = RDB_MODULE_OPCODE_EOF ) {
if ( opcode = = RDB_MODULE_OPCODE_SINT | |
opcode = = RDB_MODULE_OPCODE_UINT )
{
uint64_t len ;
if ( rdbLoadLenByRef ( rdb , NULL , & len ) = = - 1 ) {
rdbExitReportCorruptRDB (
" Error reading integer from module %s value " , modulename ) ;
}
} else if ( opcode = = RDB_MODULE_OPCODE_STRING ) {
robj * o = rdbGenericLoadStringObject ( rdb , RDB_LOAD_NONE , NULL ) ;
if ( o = = NULL ) {
rdbExitReportCorruptRDB (
" Error reading string from module %s value " , modulename ) ;
}
decrRefCount ( o ) ;
} else if ( opcode = = RDB_MODULE_OPCODE_FLOAT ) {
float val ;
if ( rdbLoadBinaryFloatValue ( rdb , & val ) = = - 1 ) {
rdbExitReportCorruptRDB (
" Error reading float from module %s value " , modulename ) ;
}
} else if ( opcode = = RDB_MODULE_OPCODE_DOUBLE ) {
double val ;
if ( rdbLoadBinaryDoubleValue ( rdb , & val ) = = - 1 ) {
rdbExitReportCorruptRDB (
" Error reading double from module %s value " , modulename ) ;
}
}
}
return createStringObject ( " module-dummy-value " , 18 ) ;
}
2010-06-21 18:07:48 -04:00
/* Load a Redis object of the specified type from the specified file.
* On success a newly allocated object is returned , otherwise NULL . */
2011-05-13 16:14:39 -04:00
robj * rdbLoadObject ( int rdbtype , rio * rdb ) {
2014-05-12 11:44:37 -04:00
robj * o = NULL , * ele , * dec ;
2016-09-01 05:08:44 -04:00
uint64_t len ;
2010-07-02 13:57:12 -04:00
unsigned int i ;
2010-06-21 18:07:48 -04:00
2015-07-27 03:41:48 -04:00
if ( rdbtype = = RDB_TYPE_STRING ) {
2010-06-21 18:07:48 -04:00
/* Read string value */
2011-05-13 11:31:00 -04:00
if ( ( o = rdbLoadEncodedStringObject ( rdb ) ) = = NULL ) return NULL ;
2010-06-21 18:07:48 -04:00
o = tryObjectEncoding ( o ) ;
2015-07-27 03:41:48 -04:00
} else if ( rdbtype = = RDB_TYPE_LIST ) {
2010-06-21 18:07:48 -04:00
/* Read list value */
2015-07-27 03:41:48 -04:00
if ( ( len = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
2010-06-21 18:07:48 -04:00
2014-11-13 14:11:47 -05:00
o = createQuicklistObject ( ) ;
2014-12-16 00:49:14 -05:00
quicklistSetOptions ( o - > ptr , server . list_max_ziplist_size ,
server . list_compress_depth ) ;
2010-06-21 18:07:48 -04:00
/* Load every single element of the list */
while ( len - - ) {
2011-05-13 11:31:00 -04:00
if ( ( ele = rdbLoadEncodedStringObject ( rdb ) ) = = NULL ) return NULL ;
2014-11-13 14:11:47 -05:00
dec = getDecodedObject ( ele ) ;
size_t len = sdslen ( dec - > ptr ) ;
2014-12-16 00:49:14 -05:00
quicklistPushTail ( o - > ptr , dec - > ptr , len ) ;
2014-11-13 14:11:47 -05:00
decrRefCount ( dec ) ;
decrRefCount ( ele ) ;
2010-06-21 18:07:48 -04:00
}
2015-07-27 03:41:48 -04:00
} else if ( rdbtype = = RDB_TYPE_SET ) {
2015-07-31 12:01:23 -04:00
/* Read Set value */
2015-07-27 03:41:48 -04:00
if ( ( len = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
2010-07-02 13:57:12 -04:00
/* Use a regular set when there are too many entries. */
if ( len > server . set_max_intset_entries ) {
o = createSetObject ( ) ;
/* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */
if ( len > DICT_HT_INITIAL_SIZE )
dictExpand ( o - > ptr , len ) ;
} else {
o = createIntsetObject ( ) ;
}
2015-07-31 12:01:23 -04:00
/* Load every single element of the set */
2010-07-02 13:57:12 -04:00
for ( i = 0 ; i < len ; i + + ) {
long long llval ;
2015-07-31 12:01:23 -04:00
sds sdsele ;
2016-05-18 05:45:40 -04:00
if ( ( sdsele = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
2010-07-02 13:57:12 -04:00
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_INTSET ) {
2015-08-04 03:20:55 -04:00
/* Fetch integer value from element. */
2015-07-31 12:01:23 -04:00
if ( isSdsRepresentableAsLongLong ( sdsele , & llval ) = = C_OK ) {
2010-07-02 13:57:12 -04:00
o - > ptr = intsetAdd ( o - > ptr , llval , NULL ) ;
} else {
2015-07-26 09:28:00 -04:00
setTypeConvert ( o , OBJ_ENCODING_HT ) ;
2010-07-02 13:57:12 -04:00
dictExpand ( o - > ptr , len ) ;
}
}
/* This will also be called when the set was just converted
2015-08-04 03:20:55 -04:00
* to a regular hash table encoded set . */
2015-07-26 09:28:00 -04:00
if ( o - > encoding = = OBJ_ENCODING_HT ) {
2015-07-31 12:01:23 -04:00
dictAdd ( ( dict * ) o - > ptr , sdsele , NULL ) ;
2010-08-26 07:18:24 -04:00
} else {
2015-07-31 12:01:23 -04:00
sdsfree ( sdsele ) ;
2010-07-02 13:57:12 -04:00
}
2010-06-21 18:07:48 -04:00
}
2016-06-01 05:55:47 -04:00
} else if ( rdbtype = = RDB_TYPE_ZSET_2 | | rdbtype = = RDB_TYPE_ZSET ) {
2015-08-04 03:20:55 -04:00
/* Read list/set value. */
2016-09-01 05:08:44 -04:00
uint64_t zsetlen ;
2011-03-10 11:50:13 -05:00
size_t maxelelen = 0 ;
2010-06-21 18:07:48 -04:00
zset * zs ;
2015-07-27 03:41:48 -04:00
if ( ( zsetlen = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
2010-06-21 18:07:48 -04:00
o = createZsetObject ( ) ;
zs = o - > ptr ;
2011-03-10 11:50:13 -05:00
2015-08-04 03:20:55 -04:00
/* Load every single element of the sorted set. */
2010-06-21 18:07:48 -04:00
while ( zsetlen - - ) {
2015-08-04 03:20:55 -04:00
sds sdsele ;
2010-09-22 12:07:52 -04:00
double score ;
zskiplistNode * znode ;
2010-06-21 18:07:48 -04:00
2016-05-18 05:45:40 -04:00
if ( ( sdsele = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
2016-06-01 05:55:47 -04:00
if ( rdbtype = = RDB_TYPE_ZSET_2 ) {
if ( rdbLoadBinaryDoubleValue ( rdb , & score ) = = - 1 ) return NULL ;
} else {
if ( rdbLoadDoubleValue ( rdb , & score ) = = - 1 ) return NULL ;
}
2011-03-10 11:50:13 -05:00
/* Don't care about integer-encoded strings. */
2015-08-04 03:20:55 -04:00
if ( sdslen ( sdsele ) > maxelelen ) maxelelen = sdslen ( sdsele ) ;
2011-03-10 11:50:13 -05:00
2015-08-04 03:20:55 -04:00
znode = zslInsert ( zs - > zsl , score , sdsele ) ;
dictAdd ( zs - > dict , sdsele , & znode - > score ) ;
2010-06-21 18:07:48 -04:00
}
2011-03-10 11:50:13 -05:00
/* Convert *after* loading, since sorted sets are not stored ordered. */
if ( zsetLength ( o ) < = server . zset_max_ziplist_entries & &
maxelelen < = server . zset_max_ziplist_value )
2015-07-26 09:28:00 -04:00
zsetConvert ( o , OBJ_ENCODING_ZIPLIST ) ;
2015-07-27 03:41:48 -04:00
} else if ( rdbtype = = RDB_TYPE_HASH ) {
2016-09-01 05:08:44 -04:00
uint64_t len ;
2012-01-03 01:14:10 -05:00
int ret ;
2015-09-23 04:34:53 -04:00
sds field , value ;
2012-01-03 01:14:10 -05:00
len = rdbLoadLen ( rdb , NULL ) ;
2015-07-27 03:41:48 -04:00
if ( len = = RDB_LENERR ) return NULL ;
2010-06-21 18:07:48 -04:00
o = createHashObject ( ) ;
2012-01-03 01:14:10 -05:00
2013-12-05 10:35:32 -05:00
/* Too many entries? Use a hash table. */
2012-01-03 01:14:10 -05:00
if ( len > server . hash_max_ziplist_entries )
2015-07-26 09:28:00 -04:00
hashTypeConvert ( o , OBJ_ENCODING_HT ) ;
2012-01-03 01:14:10 -05:00
/* Load every field and value into the ziplist */
2015-07-26 09:28:00 -04:00
while ( o - > encoding = = OBJ_ENCODING_ZIPLIST & & len > 0 ) {
2012-03-13 04:49:11 -04:00
len - - ;
2012-01-03 01:14:10 -05:00
/* Load raw strings */
2016-05-18 05:45:40 -04:00
if ( ( field = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
if ( ( value = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
2012-01-03 01:14:10 -05:00
2012-03-13 05:59:29 -04:00
/* Add pair to ziplist */
2015-09-23 04:34:53 -04:00
o - > ptr = ziplistPush ( o - > ptr , ( unsigned char * ) field ,
sdslen ( field ) , ZIPLIST_TAIL ) ;
o - > ptr = ziplistPush ( o - > ptr , ( unsigned char * ) value ,
sdslen ( value ) , ZIPLIST_TAIL ) ;
2012-01-03 01:14:10 -05:00
/* Convert to hash table if size threshold is exceeded */
2015-09-23 04:34:53 -04:00
if ( sdslen ( field ) > server . hash_max_ziplist_value | |
sdslen ( value ) > server . hash_max_ziplist_value )
2010-06-21 18:07:48 -04:00
{
2015-09-23 04:34:53 -04:00
sdsfree ( field ) ;
sdsfree ( value ) ;
2015-07-26 09:28:00 -04:00
hashTypeConvert ( o , OBJ_ENCODING_HT ) ;
2012-01-03 01:14:10 -05:00
break ;
2010-06-21 18:07:48 -04:00
}
2015-09-23 04:34:53 -04:00
sdsfree ( field ) ;
sdsfree ( value ) ;
2010-06-21 18:07:48 -04:00
}
2012-01-03 01:14:10 -05:00
/* Load remaining fields and values into the hash table */
2015-07-26 09:28:00 -04:00
while ( o - > encoding = = OBJ_ENCODING_HT & & len > 0 ) {
2012-03-13 04:49:11 -04:00
len - - ;
2012-01-03 01:14:10 -05:00
/* Load encoded strings */
2016-05-18 05:45:40 -04:00
if ( ( field = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
if ( ( value = rdbGenericLoadStringObject ( rdb , RDB_LOAD_SDS , NULL ) )
= = NULL ) return NULL ;
2012-01-03 01:14:10 -05:00
/* Add pair to hash table */
ret = dictAdd ( ( dict * ) o - > ptr , field , value ) ;
2014-05-12 11:44:37 -04:00
if ( ret = = DICT_ERR ) {
rdbExitReportCorruptRDB ( " Duplicate keys detected " ) ;
}
2012-01-03 01:14:10 -05:00
}
/* All pairs should be read by now */
2015-07-26 09:29:53 -04:00
serverAssert ( len = = 0 ) ;
2015-07-27 03:41:48 -04:00
} else if ( rdbtype = = RDB_TYPE_LIST_QUICKLIST ) {
if ( ( len = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR ) return NULL ;
2014-12-10 13:53:12 -05:00
o = createQuicklistObject ( ) ;
2014-12-16 00:49:14 -05:00
quicklistSetOptions ( o - > ptr , server . list_max_ziplist_size ,
server . list_compress_depth ) ;
2012-01-03 01:14:10 -05:00
2014-12-10 13:53:12 -05:00
while ( len - - ) {
2016-05-18 05:45:40 -04:00
unsigned char * zl =
rdbGenericLoadStringObject ( rdb , RDB_LOAD_PLAIN , NULL ) ;
2015-01-07 04:20:55 -05:00
if ( zl = = NULL ) return NULL ;
2014-12-10 13:53:12 -05:00
quicklistAppendZiplist ( o - > ptr , zl ) ;
}
2015-07-27 03:41:48 -04:00
} else if ( rdbtype = = RDB_TYPE_HASH_ZIPMAP | |
rdbtype = = RDB_TYPE_LIST_ZIPLIST | |
rdbtype = = RDB_TYPE_SET_INTSET | |
rdbtype = = RDB_TYPE_ZSET_ZIPLIST | |
rdbtype = = RDB_TYPE_HASH_ZIPLIST )
2011-02-28 11:53:47 -05:00
{
2016-05-18 05:45:40 -04:00
unsigned char * encoded =
rdbGenericLoadStringObject ( rdb , RDB_LOAD_PLAIN , NULL ) ;
2015-01-07 04:20:55 -05:00
if ( encoded = = NULL ) return NULL ;
2015-07-26 09:28:00 -04:00
o = createObject ( OBJ_STRING , encoded ) ; /* Obj type fixed below. */
2011-02-28 11:53:47 -05:00
/* Fix the object encoding, and make sure to convert the encoded
* data type into the base type if accordingly to the current
* configuration there are too many elements in the encoded data
* type . Note that we only check the length and not max element
* size as this is an O ( N ) scan . Eventually everything will get
* converted . */
2011-05-13 16:14:39 -04:00
switch ( rdbtype ) {
2015-07-27 03:41:48 -04:00
case RDB_TYPE_HASH_ZIPMAP :
2012-01-03 01:14:10 -05:00
/* Convert to ziplist encoded hash. This must be deprecated
* when loading dumps created by Redis 2.4 gets deprecated . */
{
unsigned char * zl = ziplistNew ( ) ;
unsigned char * zi = zipmapRewind ( o - > ptr ) ;
2012-01-25 16:26:25 -05:00
unsigned char * fstr , * vstr ;
unsigned int flen , vlen ;
unsigned int maxlen = 0 ;
2012-01-03 01:14:10 -05:00
2012-01-25 16:26:25 -05:00
while ( ( zi = zipmapNext ( zi , & fstr , & flen , & vstr , & vlen ) ) ! = NULL ) {
if ( flen > maxlen ) maxlen = flen ;
if ( vlen > maxlen ) maxlen = vlen ;
2012-01-03 01:14:10 -05:00
zl = ziplistPush ( zl , fstr , flen , ZIPLIST_TAIL ) ;
zl = ziplistPush ( zl , vstr , vlen , ZIPLIST_TAIL ) ;
}
zfree ( o - > ptr ) ;
o - > ptr = zl ;
2015-07-26 09:28:00 -04:00
o - > type = OBJ_HASH ;
o - > encoding = OBJ_ENCODING_ZIPLIST ;
2012-01-03 01:14:10 -05:00
2012-01-25 16:26:25 -05:00
if ( hashTypeLength ( o ) > server . hash_max_ziplist_entries | |
maxlen > server . hash_max_ziplist_value )
{
2015-07-26 09:28:00 -04:00
hashTypeConvert ( o , OBJ_ENCODING_HT ) ;
2012-01-25 16:26:25 -05:00
}
2012-01-03 01:14:10 -05:00
}
2011-02-28 11:53:47 -05:00
break ;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_LIST_ZIPLIST :
2015-07-26 09:28:00 -04:00
o - > type = OBJ_LIST ;
o - > encoding = OBJ_ENCODING_ZIPLIST ;
listTypeConvert ( o , OBJ_ENCODING_QUICKLIST ) ;
2011-02-28 11:53:47 -05:00
break ;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_SET_INTSET :
2015-07-26 09:28:00 -04:00
o - > type = OBJ_SET ;
o - > encoding = OBJ_ENCODING_INTSET ;
2011-02-28 11:53:47 -05:00
if ( intsetLen ( o - > ptr ) > server . set_max_intset_entries )
2015-07-26 09:28:00 -04:00
setTypeConvert ( o , OBJ_ENCODING_HT ) ;
2011-02-28 11:53:47 -05:00
break ;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_ZSET_ZIPLIST :
2015-07-26 09:28:00 -04:00
o - > type = OBJ_ZSET ;
o - > encoding = OBJ_ENCODING_ZIPLIST ;
2011-03-10 11:50:13 -05:00
if ( zsetLength ( o ) > server . zset_max_ziplist_entries )
2015-07-26 09:28:00 -04:00
zsetConvert ( o , OBJ_ENCODING_SKIPLIST ) ;
2011-03-09 07:16:38 -05:00
break ;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_HASH_ZIPLIST :
2015-07-26 09:28:00 -04:00
o - > type = OBJ_HASH ;
o - > encoding = OBJ_ENCODING_ZIPLIST ;
2012-01-03 01:14:10 -05:00
if ( hashTypeLength ( o ) > server . hash_max_ziplist_entries )
2015-07-26 09:28:00 -04:00
hashTypeConvert ( o , OBJ_ENCODING_HT ) ;
2012-01-03 01:14:10 -05:00
break ;
2011-02-28 11:53:47 -05:00
default :
2016-07-01 09:26:55 -04:00
rdbExitReportCorruptRDB ( " Unknown RDB encoding type %d " , rdbtype ) ;
2011-02-28 11:53:47 -05:00
break ;
2011-02-28 10:55:34 -05:00
}
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
} else if ( rdbtype = = RDB_TYPE_MODULE | | rdbtype = = RDB_TYPE_MODULE_2 ) {
2016-05-18 05:45:40 -04:00
uint64_t moduleid = rdbLoadLen ( rdb , NULL ) ;
moduleType * mt = moduleTypeLookupModuleByID ( moduleid ) ;
char name [ 10 ] ;
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
if ( rdbCheckMode & & rdbtype = = RDB_TYPE_MODULE_2 )
return rdbLoadCheckModuleValue ( rdb , name ) ;
2016-05-18 05:45:40 -04:00
if ( mt = = NULL ) {
moduleTypeNameByID ( name , moduleid ) ;
serverLog ( LL_WARNING , " The RDB file contains module data I can't load: no matching module '%s' " , name ) ;
exit ( 1 ) ;
}
RedisModuleIO io ;
moduleInitIOContext ( io , mt , rdb ) ;
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
io . ver = ( rdbtype = = RDB_TYPE_MODULE ) ? 1 : 2 ;
2016-05-18 05:45:40 -04:00
/* Call the rdb_load method of the module providing the 10 bit
* encoding version in the lower 10 bits of the module ID . */
void * ptr = mt - > rdb_load ( & io , moduleid & 1023 ) ;
2017-07-06 05:20:49 -04:00
if ( io . ctx ) {
moduleFreeContext ( io . ctx ) ;
zfree ( io . ctx ) ;
}
RDB modules values serialization format version 2.
The original RDB serialization format was not parsable without the
module loaded, becuase the structure was managed only by the module
itself. Moreover RDB is a streaming protocol in the sense that it is
both produce di an append-only fashion, and is also sometimes directly
sent to the socket (in the case of diskless replication).
The fact that modules values cannot be parsed without the relevant
module loaded is a problem in many ways: RDB checking tools must have
loaded modules even for doing things not involving the value at all,
like splitting an RDB into N RDBs by key or alike, or just checking the
RDB for sanity.
In theory module values could be just a blob of data with a prefixed
length in order for us to be able to skip it. However prefixing the values
with a length would mean one of the following:
1. To be able to write some data at a previous offset. This breaks
stremaing.
2. To bufferize values before outputting them. This breaks performances.
3. To have some chunked RDB output format. This breaks simplicity.
Moreover, the above solution, still makes module values a totally opaque
matter, with the fowllowing problems:
1. The RDB check tool can just skip the value without being able to at
least check the general structure. For datasets composed mostly of
modules values this means to just check the outer level of the RDB not
actually doing any checko on most of the data itself.
2. It is not possible to do any recovering or processing of data for which a
module no longer exists in the future, or is unknown.
So this commit implements a different solution. The modules RDB
serialization API is composed if well defined calls to store integers,
floats, doubles or strings. After this commit, the parts generated by
the module API have a one-byte prefix for each of the above emitted
parts, and there is a final EOF byte as well. So even if we don't know
exactly how to interpret a module value, we can always parse it at an
high level, check the overall structure, understand the types used to
store the information, and easily skip the whole value.
The change is backward compatible: older RDB files can be still loaded
since the new encoding has a new RDB type: MODULE_2 (of value 7).
The commit also implements the ability to check RDB files for sanity
taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* Module v2 serialization has an EOF mark at the end. */
if ( io . ver = = 2 ) {
uint64_t eof = rdbLoadLen ( rdb , NULL ) ;
if ( eof ! = RDB_MODULE_OPCODE_EOF ) {
serverLog ( LL_WARNING , " The RDB file contains module data for the module '%s' that is not terminated by the proper module value EOF marker " , name ) ;
exit ( 1 ) ;
}
}
2016-05-18 05:45:40 -04:00
if ( ptr = = NULL ) {
moduleTypeNameByID ( name , moduleid ) ;
serverLog ( LL_WARNING , " The RDB file contains module data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues. " , name ) ;
exit ( 1 ) ;
}
o = createModuleObject ( mt , ptr ) ;
2010-06-21 18:07:48 -04:00
} else {
2016-07-01 09:26:55 -04:00
rdbExitReportCorruptRDB ( " Unknown RDB encoding type %d " , rdbtype ) ;
2010-06-21 18:07:48 -04:00
}
return o ;
}
2010-11-08 05:52:03 -05:00
/* Mark that we are loading in the global state and setup the fields
* needed to provide loading stats . */
void startLoading ( FILE * fp ) {
struct stat sb ;
/* Load the DB */
server . loading = 1 ;
server . loading_start_time = time ( NULL ) ;
2014-12-23 08:52:57 -05:00
server . loading_loaded_bytes = 0 ;
2010-11-08 05:52:03 -05:00
if ( fstat ( fileno ( fp ) , & sb ) = = - 1 ) {
2014-12-23 08:52:57 -05:00
server . loading_total_bytes = 0 ;
2010-11-08 05:52:03 -05:00
} else {
server . loading_total_bytes = sb . st_size ;
}
}
/* Refresh the loading progress info */
void loadingProgress ( off_t pos ) {
server . loading_loaded_bytes = pos ;
2012-10-24 06:21:34 -04:00
if ( server . stat_peak_memory < zmalloc_used_memory ( ) )
server . stat_peak_memory = zmalloc_used_memory ( ) ;
2010-11-08 05:52:03 -05:00
}
/* Loading finished */
void stopLoading ( void ) {
server . loading = 0 ;
}
2012-12-12 08:59:22 -05:00
/* Track loading progress in order to serve client's from time to time
and if needed calculate rdb checksum */
void rdbLoadProgressCallback ( rio * r , const void * buf , size_t len ) {
if ( server . rdb_checksum )
rioGenericUpdateChecksum ( r , buf , len ) ;
if ( server . loading_process_events_interval_bytes & &
2013-12-09 07:32:44 -05:00
( r - > processed_bytes + len ) / server . loading_process_events_interval_bytes > r - > processed_bytes / server . loading_process_events_interval_bytes )
{
2014-02-13 09:09:41 -05:00
/* The DB can take some non trivial amount of time to load. Update
* our cached time since it is used to create and update the last
* interaction time with clients and for other important things . */
updateCachedTime ( ) ;
2015-07-27 03:41:48 -04:00
if ( server . masterhost & & server . repl_state = = REPL_STATE_TRANSFER )
2013-12-10 12:38:26 -05:00
replicationSendNewlineToMaster ( ) ;
2012-12-12 08:59:22 -05:00
loadingProgress ( r - > processed_bytes ) ;
2014-04-24 11:36:47 -04:00
processEventsWhileBlocked ( ) ;
2012-12-12 08:59:22 -05:00
}
}
2016-08-11 09:27:23 -04:00
/* Load an RDB file from the rio stream 'rdb'. On success C_OK is returned,
* otherwise C_ERR is returned and ' errno ' is set accordingly . */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbLoadRio ( rio * rdb , rdbSaveInfo * rsi ) {
2016-06-01 14:18:28 -04:00
uint64_t dbid ;
2011-06-14 09:34:27 -04:00
int type , rdbver ;
2010-06-21 18:07:48 -04:00
redisDb * db = server . db + 0 ;
char buf [ 1024 ] ;
2011-11-09 10:51:19 -05:00
long long expiretime , now = mstime ( ) ;
2010-06-21 18:07:48 -04:00
2016-08-11 09:27:23 -04:00
rdb - > update_cksum = rdbLoadProgressCallback ;
rdb - > max_processing_chunk = server . loading_process_events_interval_bytes ;
if ( rioRead ( rdb , buf , 9 ) = = 0 ) goto eoferr ;
2010-06-21 18:07:48 -04:00
buf [ 9 ] = ' \0 ' ;
if ( memcmp ( buf , " REDIS " , 5 ) ! = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Wrong signature trying to load DB from file " ) ;
2011-10-14 10:59:32 -04:00
errno = EINVAL ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2010-06-21 18:07:48 -04:00
}
rdbver = atoi ( buf + 5 ) ;
2015-07-27 03:41:48 -04:00
if ( rdbver < 1 | | rdbver > RDB_VERSION ) {
serverLog ( LL_WARNING , " Can't handle RDB format version %d " , rdbver ) ;
2011-10-14 10:59:32 -04:00
errno = EINVAL ;
2015-07-26 17:17:55 -04:00
return C_ERR ;
2010-06-21 18:07:48 -04:00
}
2010-11-08 05:52:03 -05:00
2010-06-21 18:07:48 -04:00
while ( 1 ) {
robj * key , * val ;
expiretime = - 1 ;
2010-11-08 05:52:03 -05:00
2010-06-21 18:07:48 -04:00
/* Read type. */
2016-08-11 09:27:23 -04:00
if ( ( type = rdbLoadType ( rdb ) ) = = - 1 ) goto eoferr ;
2015-01-07 09:25:58 -05:00
/* Handle special types. */
2015-07-27 03:41:48 -04:00
if ( type = = RDB_OPCODE_EXPIRETIME ) {
2015-01-07 09:25:58 -05:00
/* EXPIRETIME: load an expire associated with the next key
* to load . Note that after loading an expire we need to
* load the actual type , and continue . */
2016-08-11 09:27:23 -04:00
if ( ( expiretime = rdbLoadTime ( rdb ) ) = = - 1 ) goto eoferr ;
2011-05-13 16:14:39 -04:00
/* We read the time so we need to read the object type again. */
2016-08-11 09:27:23 -04:00
if ( ( type = rdbLoadType ( rdb ) ) = = - 1 ) goto eoferr ;
2011-11-09 15:59:27 -05:00
/* the EXPIRETIME opcode specifies time in seconds, so convert
2013-01-16 12:00:20 -05:00
* into milliseconds . */
2011-11-09 10:51:19 -05:00
expiretime * = 1000 ;
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_OPCODE_EXPIRETIME_MS ) {
2015-01-07 09:25:58 -05:00
/* EXPIRETIME_MS: milliseconds precision expire times introduced
* with RDB v3 . Like EXPIRETIME but no with more precision . */
2016-08-11 09:27:23 -04:00
if ( ( expiretime = rdbLoadMillisecondTime ( rdb ) ) = = - 1 ) goto eoferr ;
2011-11-09 10:51:19 -05:00
/* We read the time so we need to read the object type again. */
2016-08-11 09:27:23 -04:00
if ( ( type = rdbLoadType ( rdb ) ) = = - 1 ) goto eoferr ;
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_OPCODE_EOF ) {
2015-01-07 09:25:58 -05:00
/* EOF: End of file, exit the main loop. */
2011-05-13 16:14:39 -04:00
break ;
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_OPCODE_SELECTDB ) {
2015-01-07 09:25:58 -05:00
/* SELECTDB: Select the specified database. */
2016-08-11 09:27:23 -04:00
if ( ( dbid = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR )
2010-06-21 18:07:48 -04:00
goto eoferr ;
if ( dbid > = ( unsigned ) server . dbnum ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING ,
2015-01-07 09:25:58 -05:00
" FATAL: Data file was created with a Redis "
" server configured to handle more than %d "
" databases. Exiting \n " , server . dbnum ) ;
2010-06-21 18:07:48 -04:00
exit ( 1 ) ;
}
db = server . db + dbid ;
2015-01-07 09:25:58 -05:00
continue ; /* Read type again. */
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_OPCODE_RESIZEDB ) {
2015-01-07 09:25:58 -05:00
/* RESIZEDB: Hint about the size of the keys in the currently
* selected data base , in order to avoid useless rehashing . */
2016-06-01 14:18:28 -04:00
uint64_t db_size , expires_size ;
2016-08-11 09:27:23 -04:00
if ( ( db_size = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR )
2015-01-07 05:08:41 -05:00
goto eoferr ;
2016-08-11 09:27:23 -04:00
if ( ( expires_size = rdbLoadLen ( rdb , NULL ) ) = = RDB_LENERR )
2015-01-07 05:08:41 -05:00
goto eoferr ;
dictExpand ( db - > dict , db_size ) ;
dictExpand ( db - > expires , expires_size ) ;
2015-01-07 09:25:58 -05:00
continue ; /* Read type again. */
2015-07-27 03:41:48 -04:00
} else if ( type = = RDB_OPCODE_AUX ) {
2015-01-08 02:56:35 -05:00
/* AUX: generic string-string fields. Use to add state to RDB
* which is backward compatible . Implementations of RDB loading
* are requierd to skip AUX fields they don ' t understand .
*
* An AUX field is composed of two strings : key and value . */
robj * auxkey , * auxval ;
2016-08-11 09:27:23 -04:00
if ( ( auxkey = rdbLoadStringObject ( rdb ) ) = = NULL ) goto eoferr ;
if ( ( auxval = rdbLoadStringObject ( rdb ) ) = = NULL ) goto eoferr ;
2015-01-08 02:56:35 -05:00
if ( ( ( char * ) auxkey - > ptr ) [ 0 ] = = ' % ' ) {
/* All the fields with a name staring with '%' are considered
* information fields and are logged at startup with a log
* level of NOTICE . */
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE , " RDB '%s': %s " ,
2015-01-21 08:51:42 -05:00
( char * ) auxkey - > ptr ,
( char * ) auxval - > ptr ) ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
} else if ( ! strcasecmp ( auxkey - > ptr , " repl-stream-db " ) ) {
if ( rsi ) rsi - > repl_stream_db = atoi ( auxval - > ptr ) ;
2016-11-10 06:35:29 -05:00
} else if ( ! strcasecmp ( auxkey - > ptr , " repl-id " ) ) {
if ( rsi & & sdslen ( auxval - > ptr ) = = CONFIG_RUN_ID_SIZE ) {
memcpy ( rsi - > repl_id , auxval - > ptr , CONFIG_RUN_ID_SIZE + 1 ) ;
rsi - > repl_id_is_set = 1 ;
}
} else if ( ! strcasecmp ( auxkey - > ptr , " repl-offset " ) ) {
if ( rsi ) rsi - > repl_offset = strtoll ( auxval - > ptr , NULL , 10 ) ;
2015-01-08 02:56:35 -05:00
} else {
/* We ignore fields we don't understand, as by AUX field
* contract . */
2015-07-27 03:41:48 -04:00
serverLog ( LL_DEBUG , " Unrecognized RDB AUX field: '%s' " ,
2015-01-21 08:51:42 -05:00
( char * ) auxkey - > ptr ) ;
2015-01-08 02:56:35 -05:00
}
2015-01-08 16:23:48 -05:00
decrRefCount ( auxkey ) ;
decrRefCount ( auxval ) ;
2015-01-08 02:56:35 -05:00
continue ; /* Read type again. */
2010-06-21 18:07:48 -04:00
}
2015-01-07 09:25:58 -05:00
2010-06-21 18:07:48 -04:00
/* Read key */
2016-08-11 09:27:23 -04:00
if ( ( key = rdbLoadStringObject ( rdb ) ) = = NULL ) goto eoferr ;
2010-06-21 18:07:48 -04:00
/* Read value */
2016-08-11 09:27:23 -04:00
if ( ( val = rdbLoadObject ( type , rdb ) ) = = NULL ) goto eoferr ;
2012-01-13 20:49:16 -05:00
/* Check if the key already expired. This function is used when loading
* an RDB file from disk , either at startup , or when an RDB was
* received from the master . In the latter case , the master is
* responsible for key expiry . If we would expire keys here , the
* snapshot taken by the master may not be reflected on the slave . */
if ( server . masterhost = = NULL & & expiretime ! = - 1 & & expiretime < now ) {
2010-06-21 18:07:48 -04:00
decrRefCount ( key ) ;
decrRefCount ( val ) ;
continue ;
}
/* Add the new object in the hash table */
2011-06-14 09:34:27 -04:00
dbAdd ( db , key , val ) ;
2010-06-21 18:07:48 -04:00
/* Set the expire time if needed */
Replication: fix the infamous key leakage of writable slaves + EXPIRE.
BACKGROUND AND USE CASEj
Redis slaves are normally write only, however the supprot a "writable"
mode which is very handy when scaling reads on slaves, that actually
need write operations in order to access data. For instance imagine
having slaves replicating certain Sets keys from the master. When
accessing the data on the slave, we want to peform intersections between
such Sets values. However we don't want to intersect each time: to cache
the intersection for some time often is a good idea.
To do so, it is possible to setup a slave as a writable slave, and
perform the intersection on the slave side, perhaps setting a TTL on the
resulting key so that it will expire after some time.
THE BUG
Problem: in order to have a consistent replication, expiring of keys in
Redis replication is up to the master, that synthesize DEL operations to
send in the replication stream. However slaves logically expire keys
by hiding them from read attempts from clients so that if the master did
not promptly sent a DEL, the client still see logically expired keys
as non existing.
Because slaves don't actively expire keys by actually evicting them but
just masking from the POV of read operations, if a key is created in a
writable slave, and an expire is set, the key will be leaked forever:
1. No DEL will be received from the master, which does not know about
such a key at all.
2. No eviction will be performed by the slave, since it needs to disable
eviction because it's up to masters, otherwise consistency of data is
lost.
THE FIX
In order to fix the problem, the slave should be able to tag keys that
were created in the slave side and have an expire set in some way.
My solution involved using an unique additional dictionary created by
the writable slave only if needed. The dictionary is obviously keyed by
the key name that we need to track: all the keys that are set with an
expire directly by a client writing to the slave are tracked.
The value in the dictionary is a bitmap of all the DBs where such a key
name need to be tracked, so that we can use a single dictionary to track
keys in all the DBs used by the slave (actually this limits the solution
to the first 64 DBs, but the default with Redis is to use 16 DBs).
This solution allows to pay both a small complexity and CPU penalty,
which is zero when the feature is not used, actually. The slave-side
eviction is encapsulated in code which is not coupled with the rest of
the Redis core, if not for the hook to track the keys.
TODO
I'm doing the first smoke tests to see if the feature works as expected:
so far so good. Unit tests should be added before merging into the
4.0 branch.
2016-12-13 04:20:06 -05:00
if ( expiretime ! = - 1 ) setExpire ( NULL , db , key , expiretime ) ;
2010-06-21 18:07:48 -04:00
decrRefCount ( key ) ;
}
2012-04-09 16:40:41 -04:00
/* Verify the checksum if RDB version is >= 5 */
2012-04-10 09:47:10 -04:00
if ( rdbver > = 5 & & server . rdb_checksum ) {
2016-08-11 09:27:23 -04:00
uint64_t cksum , expected = rdb - > cksum ;
2012-04-09 16:40:41 -04:00
2016-08-11 09:27:23 -04:00
if ( rioRead ( rdb , & cksum , 8 ) = = 0 ) goto eoferr ;
2012-04-09 16:40:41 -04:00
memrev64ifbe ( & cksum ) ;
2012-04-10 09:47:10 -04:00
if ( cksum = = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " RDB file was saved with checksum disabled: no check performed. " ) ;
2012-04-10 09:47:10 -04:00
} else if ( cksum ! = expected ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Wrong RDB checksum. Aborting now. " ) ;
2015-02-03 04:33:05 -05:00
rdbExitReportCorruptRDB ( " RDB CRC error " ) ;
2012-04-09 16:40:41 -04:00
}
}
2015-07-26 17:17:55 -04:00
return C_OK ;
2010-06-21 18:07:48 -04:00
eoferr : /* unexpected end of file is handled here with a fatal exit */
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Short read or OOM loading DB. Unrecoverable error, aborting now. " ) ;
2015-02-03 04:33:05 -05:00
rdbExitReportCorruptRDB ( " Unexpected EOF reading RDB file " ) ;
2015-07-26 17:17:55 -04:00
return C_ERR ; /* Just to avoid warning */
2010-06-21 18:07:48 -04:00
}
2016-08-11 09:27:23 -04:00
/* Like rdbLoadRio() but takes a filename instead of a rio stream. The
* filename is open for reading and a rio stream object created in order
* to do the actual loading . Moreover the ETA displayed in the INFO
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
* output is initialized and finalized .
*
* If you pass an ' rsi ' structure initialied with RDB_SAVE_OPTION_INIT , the
* loading code will fiil the information fields in the structure . */
int rdbLoad ( char * filename , rdbSaveInfo * rsi ) {
2016-08-11 09:27:23 -04:00
FILE * fp ;
rio rdb ;
int retval ;
if ( ( fp = fopen ( filename , " r " ) ) = = NULL ) return C_ERR ;
startLoading ( fp ) ;
rioInitWithFile ( & rdb , fp ) ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
retval = rdbLoadRio ( & rdb , rsi ) ;
2016-08-11 09:27:23 -04:00
fclose ( fp ) ;
stopLoading ( ) ;
return retval ;
}
2014-10-14 04:11:26 -04:00
/* A background saving child (BGSAVE) terminated its work. Handle this.
* This function covers the case of actual BGSAVEs . */
void backgroundSaveDoneHandlerDisk ( int exitcode , int bysignal ) {
2010-06-21 18:07:48 -04:00
if ( ! bysignal & & exitcode = = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE ,
2010-06-21 18:07:48 -04:00
" Background saving terminated with success " ) ;
2010-08-30 04:32:32 -04:00
server . dirty = server . dirty - server . dirty_before_bgsave ;
2010-06-21 18:07:48 -04:00
server . lastsave = time ( NULL ) ;
2015-07-26 17:17:55 -04:00
server . lastbgsave_status = C_OK ;
2010-06-21 18:07:48 -04:00
} else if ( ! bysignal & & exitcode ! = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Background saving error " ) ;
2015-07-26 17:17:55 -04:00
server . lastbgsave_status = C_ERR ;
2010-06-21 18:07:48 -04:00
} else {
2014-07-01 11:19:08 -04:00
mstime_t latency ;
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING ,
2011-01-07 12:15:14 -05:00
" Background saving terminated by signal %d " , bysignal ) ;
2014-07-01 11:19:08 -04:00
latencyStartMonitor ( latency ) ;
2011-12-21 06:22:13 -05:00
rdbRemoveTempFile ( server . rdb_child_pid ) ;
2014-07-01 11:19:08 -04:00
latencyEndMonitor ( latency ) ;
latencyAddSampleIfNeeded ( " rdb-unlink-temp-file " , latency ) ;
2013-01-14 04:29:14 -05:00
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
* tirggering an error conditon . */
if ( bysignal ! = SIGUSR1 )
2015-07-26 17:17:55 -04:00
server . lastbgsave_status = C_ERR ;
2010-06-21 18:07:48 -04:00
}
2011-12-21 06:22:13 -05:00
server . rdb_child_pid = - 1 ;
2015-07-27 03:41:48 -04:00
server . rdb_child_type = RDB_CHILD_TYPE_NONE ;
2012-05-25 06:11:30 -04:00
server . rdb_save_time_last = time ( NULL ) - server . rdb_save_time_start ;
server . rdb_save_time_start = - 1 ;
2010-06-21 18:07:48 -04:00
/* Possibly there are slaves waiting for a BGSAVE in order to be served
* ( the first stage of SYNC is a bulk transfer of dump . rdb ) */
2015-07-27 03:41:48 -04:00
updateSlavesWaitingBgsave ( ( ! bysignal & & exitcode = = 0 ) ? C_OK : C_ERR , RDB_CHILD_TYPE_DISK ) ;
2014-10-14 04:11:26 -04:00
}
/* A background saving child (BGSAVE) terminated its work. Handle this.
* This function covers the case of RDB - > Salves socket transfers for
2014-10-15 05:35:00 -04:00
* diskless replication . */
2014-10-14 04:11:26 -04:00
void backgroundSaveDoneHandlerSocket ( int exitcode , int bysignal ) {
2014-10-15 05:35:00 -04:00
uint64_t * ok_slaves ;
2014-10-14 04:11:26 -04:00
if ( ! bysignal & & exitcode = = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE ,
2014-10-14 04:11:26 -04:00
" Background RDB transfer terminated with success " ) ;
} else if ( ! bysignal & & exitcode ! = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Background transfer error " ) ;
2014-10-14 04:11:26 -04:00
} else {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING ,
2014-10-14 04:11:26 -04:00
" Background transfer terminated by signal %d " , bysignal ) ;
}
server . rdb_child_pid = - 1 ;
2015-07-27 03:41:48 -04:00
server . rdb_child_type = RDB_CHILD_TYPE_NONE ;
2014-10-14 04:11:26 -04:00
server . rdb_save_time_start = - 1 ;
2014-10-14 09:29:07 -04:00
2014-10-15 05:35:00 -04:00
/* If the child returns an OK exit code, read the set of slave client
2014-10-23 17:10:33 -04:00
* IDs and the associated status code . We ' ll terminate all the slaves
* in error state .
2014-10-15 05:35:00 -04:00
*
* If the process returned an error , consider the list of slaves that
2014-10-23 17:10:33 -04:00
* can continue to be emtpy , so that it ' s just a special case of the
2014-10-15 05:35:00 -04:00
* normal code path . */
ok_slaves = zmalloc ( sizeof ( uint64_t ) ) ; /* Make space for the count. */
2014-10-17 04:43:56 -04:00
ok_slaves [ 0 ] = 0 ;
2014-10-15 05:35:00 -04:00
if ( ! bysignal & & exitcode = = 0 ) {
int readlen = sizeof ( uint64_t ) ;
2014-10-17 04:43:56 -04:00
if ( read ( server . rdb_pipe_read_result_from_child , ok_slaves , readlen ) = =
2014-10-15 05:35:00 -04:00
readlen )
{
2014-10-23 17:10:33 -04:00
readlen = ok_slaves [ 0 ] * sizeof ( uint64_t ) * 2 ;
2014-10-15 05:35:00 -04:00
/* Make space for enough elements as specified by the first
* uint64_t element in the array . */
ok_slaves = zrealloc ( ok_slaves , sizeof ( uint64_t ) + readlen ) ;
if ( readlen & &
read ( server . rdb_pipe_read_result_from_child , ok_slaves + 1 ,
readlen ) ! = readlen )
{
ok_slaves [ 0 ] = 0 ;
}
}
}
2014-10-14 09:29:07 -04:00
2014-10-15 05:35:00 -04:00
close ( server . rdb_pipe_read_result_from_child ) ;
close ( server . rdb_pipe_write_result_to_parent ) ;
2014-10-14 09:29:07 -04:00
/* We can continue the replication process with all the slaves that
2014-10-15 05:35:00 -04:00
* correctly received the full payload . Others are terminated . */
listNode * ln ;
listIter li ;
listRewind ( server . slaves , & li ) ;
while ( ( ln = listNext ( & li ) ) ) {
2015-07-26 09:20:46 -04:00
client * slave = ln - > value ;
2014-10-15 05:35:00 -04:00
2015-07-27 03:41:48 -04:00
if ( slave - > replstate = = SLAVE_STATE_WAIT_BGSAVE_END ) {
2014-10-15 05:35:00 -04:00
uint64_t j ;
2014-10-23 17:10:33 -04:00
int errorcode = 0 ;
2014-10-15 05:35:00 -04:00
2014-10-23 17:10:33 -04:00
/* Search for the slave ID in the reply. In order for a slave to
* continue the replication process , we need to find it in the list ,
* and it must have an error code set to 0 ( which means success ) . */
2014-10-17 04:43:56 -04:00
for ( j = 0 ; j < ok_slaves [ 0 ] ; j + + ) {
2014-10-23 17:10:33 -04:00
if ( slave - > id = = ok_slaves [ 2 * j + 1 ] ) {
errorcode = ok_slaves [ 2 * j + 2 ] ;
break ; /* Found in slaves list. */
}
2014-10-15 05:35:00 -04:00
}
2014-10-23 17:10:33 -04:00
if ( j = = ok_slaves [ 0 ] | | errorcode ! = 0 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING ,
2014-10-27 07:23:03 -04:00
" Closing slave %s: child->slave RDB transfer failed: %s " ,
replicationGetSlaveName ( slave ) ,
2014-10-23 17:10:33 -04:00
( errorcode = = 0 ) ? " RDB transfer child aborted "
: strerror ( errorcode ) ) ;
2014-10-15 05:35:00 -04:00
freeClient ( slave ) ;
2014-10-17 04:43:56 -04:00
} else {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING ,
2014-10-27 07:23:03 -04:00
" Slave %s correctly received the streamed RDB file. " ,
replicationGetSlaveName ( slave ) ) ;
2014-10-17 10:45:48 -04:00
/* Restore the socket as non-blocking. */
anetNonBlock ( NULL , slave - > fd ) ;
2014-10-22 09:53:45 -04:00
anetSendTimeout ( NULL , slave - > fd , 0 ) ;
2014-10-15 05:35:00 -04:00
}
}
}
zfree ( ok_slaves ) ;
2015-07-27 03:41:48 -04:00
updateSlavesWaitingBgsave ( ( ! bysignal & & exitcode = = 0 ) ? C_OK : C_ERR , RDB_CHILD_TYPE_SOCKET ) ;
2014-10-14 04:11:26 -04:00
}
/* When a background RDB saving/transfer terminates, call the right handler. */
void backgroundSaveDoneHandler ( int exitcode , int bysignal ) {
switch ( server . rdb_child_type ) {
2015-07-27 03:41:48 -04:00
case RDB_CHILD_TYPE_DISK :
2014-10-14 04:11:26 -04:00
backgroundSaveDoneHandlerDisk ( exitcode , bysignal ) ;
break ;
2015-07-27 03:41:48 -04:00
case RDB_CHILD_TYPE_SOCKET :
2014-10-14 04:11:26 -04:00
backgroundSaveDoneHandlerSocket ( exitcode , bysignal ) ;
break ;
default :
2015-07-27 03:41:48 -04:00
serverPanic ( " Unknown RDB child type. " ) ;
2014-10-14 04:11:26 -04:00
break ;
}
}
/* Spawn an RDB child that writes the RDB to the sockets of the slaves
2015-07-27 03:41:48 -04:00
* that are currently in SLAVE_STATE_WAIT_BGSAVE_START state . */
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveToSlavesSockets ( rdbSaveInfo * rsi ) {
2014-10-14 04:11:26 -04:00
int * fds ;
2014-10-15 03:46:45 -04:00
uint64_t * clientids ;
2014-10-14 04:11:26 -04:00
int numfds ;
listNode * ln ;
listIter li ;
pid_t childpid ;
long long start ;
2014-10-14 09:29:07 -04:00
int pipefds [ 2 ] ;
2014-10-14 04:11:26 -04:00
2016-07-21 12:34:53 -04:00
if ( server . aof_child_pid ! = - 1 | | server . rdb_child_pid ! = - 1 ) return C_ERR ;
2014-10-14 04:11:26 -04:00
2014-10-14 09:29:07 -04:00
/* Before to fork, create a pipe that will be used in order to
* send back to the parent the IDs of the slaves that successfully
* received all the writes . */
2015-07-26 17:17:55 -04:00
if ( pipe ( pipefds ) = = - 1 ) return C_ERR ;
2014-10-14 09:29:07 -04:00
server . rdb_pipe_read_result_from_child = pipefds [ 0 ] ;
server . rdb_pipe_write_result_to_parent = pipefds [ 1 ] ;
/* Collect the file descriptors of the slaves we want to transfer
* the RDB to , which are i WAIT_BGSAVE_START state . */
2014-10-14 04:11:26 -04:00
fds = zmalloc ( sizeof ( int ) * listLength ( server . slaves ) ) ;
2014-10-15 03:46:45 -04:00
/* We also allocate an array of corresponding client IDs. This will
* be useful for the child process in order to build the report
* ( sent via unix pipe ) that will be sent to the parent . */
clientids = zmalloc ( sizeof ( uint64_t ) * listLength ( server . slaves ) ) ;
2014-10-14 04:11:26 -04:00
numfds = 0 ;
listRewind ( server . slaves , & li ) ;
while ( ( ln = listNext ( & li ) ) ) {
2015-07-26 09:20:46 -04:00
client * slave = ln - > value ;
2014-10-14 04:11:26 -04:00
2015-07-27 03:41:48 -04:00
if ( slave - > replstate = = SLAVE_STATE_WAIT_BGSAVE_START ) {
2014-10-15 03:46:45 -04:00
clientids [ numfds ] = slave - > id ;
2014-10-14 04:11:26 -04:00
fds [ numfds + + ] = slave - > fd ;
2015-08-05 07:34:46 -04:00
replicationSetupSlaveForFullResync ( slave , getPsyncInitialOffset ( ) ) ;
2016-04-25 09:49:57 -04:00
/* Put the socket in blocking mode to simplify RDB transfer.
2014-10-17 10:45:48 -04:00
* We ' ll restore it when the children returns ( since duped socket
* will share the O_NONBLOCK attribute with the parent ) . */
anetBlock ( NULL , slave - > fd ) ;
2014-10-22 09:53:45 -04:00
anetSendTimeout ( NULL , slave - > fd , server . repl_timeout * 1000 ) ;
2014-10-14 04:11:26 -04:00
}
}
2014-10-14 09:29:07 -04:00
/* Create the child process. */
2016-09-19 07:45:20 -04:00
openChildInfoPipe ( ) ;
2014-10-14 04:11:26 -04:00
start = ustime ( ) ;
if ( ( childpid = fork ( ) ) = = 0 ) {
/* Child */
int retval ;
rio slave_sockets ;
rioInitWithFdset ( & slave_sockets , fds , numfds ) ;
zfree ( fds ) ;
closeListeningSockets ( 0 ) ;
redisSetProcTitle ( " redis-rdb-to-slaves " ) ;
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
retval = rdbSaveRioWithEOFMark ( & slave_sockets , NULL , rsi ) ;
2015-07-26 17:17:55 -04:00
if ( retval = = C_OK & & rioFlush ( & slave_sockets ) = = 0 )
retval = C_ERR ;
2014-10-17 05:36:12 -04:00
2015-07-26 17:17:55 -04:00
if ( retval = = C_OK ) {
2016-09-19 04:28:05 -04:00
size_t private_dirty = zmalloc_get_private_dirty ( - 1 ) ;
2014-10-14 04:11:26 -04:00
if ( private_dirty ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_NOTICE ,
2014-10-14 04:11:26 -04:00
" RDB: %zu MB of memory used by copy-on-write " ,
private_dirty / ( 1024 * 1024 ) ) ;
}
2014-10-15 03:46:45 -04:00
2016-09-19 07:45:20 -04:00
server . child_info_data . cow_size = private_dirty ;
sendChildInfo ( CHILD_INFO_TYPE_RDB ) ;
2014-10-15 03:46:45 -04:00
/* If we are returning OK, at least one slave was served
* with the RDB file as expected , so we need to send a report
* to the parent via the pipe . The format of the message is :
2014-10-23 17:10:33 -04:00
*
* < len > < slave [ 0 ] . id > < slave [ 0 ] . error > . . .
*
* len , slave IDs , and slave errors , are all uint64_t integers ,
* so basically the reply is composed of 64 bits for the len field
* plus 2 additional 64 bit integers for each entry , for a total
* of ' len ' entries .
*
* The ' id ' represents the slave ' s client ID , so that the master
* can match the report with a specific slave , and ' error ' is
* set to 0 if the replication process terminated with a success
* or the error code if an error occurred . */
void * msg = zmalloc ( sizeof ( uint64_t ) * ( 1 + 2 * numfds ) ) ;
2014-10-15 03:46:45 -04:00
uint64_t * len = msg ;
uint64_t * ids = len + 1 ;
int j , msglen ;
2014-10-23 17:10:33 -04:00
* len = numfds ;
2014-10-15 03:46:45 -04:00
for ( j = 0 ; j < numfds ; j + + ) {
2014-10-23 17:10:33 -04:00
* ids + + = clientids [ j ] ;
* ids + + = slave_sockets . io . fdset . state [ j ] ;
2014-10-15 03:46:45 -04:00
}
/* Write the message to the parent. If we have no good slaves or
* we are unable to transfer the message to the parent , we exit
* with an error so that the parent will abort the replication
* process with all the childre that were waiting . */
2014-10-23 17:10:33 -04:00
msglen = sizeof ( uint64_t ) * ( 1 + 2 * numfds ) ;
2014-10-15 03:46:45 -04:00
if ( * len = = 0 | |
write ( server . rdb_pipe_write_result_to_parent , msg , msglen )
! = msglen )
{
2015-07-26 17:17:55 -04:00
retval = C_ERR ;
2014-10-15 03:46:45 -04:00
}
2014-12-21 08:10:06 -05:00
zfree ( msg ) ;
2014-10-14 04:11:26 -04:00
}
2014-12-21 08:10:06 -05:00
zfree ( clientids ) ;
2016-04-25 09:49:57 -04:00
rioFreeFdset ( & slave_sockets ) ;
2015-07-26 17:17:55 -04:00
exitFromChild ( ( retval = = C_OK ) ? 0 : 1 ) ;
2014-10-14 04:11:26 -04:00
} else {
/* Parent */
if ( childpid = = - 1 ) {
2015-07-27 03:41:48 -04:00
serverLog ( LL_WARNING , " Can't save in background: fork: %s " ,
2014-10-14 04:11:26 -04:00
strerror ( errno ) ) ;
2015-09-07 10:09:23 -04:00
/* Undo the state change. The caller will perform cleanup on
* all the slaves in BGSAVE_START state , but an early call to
* replicationSetupSlaveForFullResync ( ) turned it into BGSAVE_END */
listRewind ( server . slaves , & li ) ;
while ( ( ln = listNext ( & li ) ) ) {
client * slave = ln - > value ;
int j ;
for ( j = 0 ; j < numfds ; j + + ) {
if ( slave - > id = = clientids [ j ] ) {
slave - > replstate = SLAVE_STATE_WAIT_BGSAVE_START ;
break ;
}
}
}
2014-10-14 09:29:07 -04:00
close ( pipefds [ 0 ] ) ;
close ( pipefds [ 1 ] ) ;
2016-09-19 07:45:20 -04:00
closeChildInfoPipe ( ) ;
2015-09-07 10:09:23 -04:00
} else {
2017-05-19 05:10:36 -04:00
server . stat_fork_time = ustime ( ) - start ;
server . stat_fork_rate = ( double ) zmalloc_used_memory ( ) * 1000000 / server . stat_fork_time / ( 1024 * 1024 * 1024 ) ; /* GB per second. */
latencyAddSampleIfNeeded ( " fork " , server . stat_fork_time / 1000 ) ;
2015-09-07 10:09:23 -04:00
serverLog ( LL_NOTICE , " Background RDB transfer started by pid %d " ,
childpid ) ;
server . rdb_save_time_start = time ( NULL ) ;
server . rdb_child_pid = childpid ;
server . rdb_child_type = RDB_CHILD_TYPE_SOCKET ;
updateDictResizePolicy ( ) ;
2014-10-14 04:11:26 -04:00
}
2015-09-07 10:09:23 -04:00
zfree ( clientids ) ;
2014-10-14 04:11:26 -04:00
zfree ( fds ) ;
2015-09-07 10:09:23 -04:00
return ( childpid = = - 1 ) ? C_ERR : C_OK ;
2014-10-14 04:11:26 -04:00
}
2015-09-07 10:09:23 -04:00
return C_OK ; /* Unreached. */
2010-06-21 18:07:48 -04:00
}
2011-01-07 12:15:14 -05:00
2015-07-26 09:20:46 -04:00
void saveCommand ( client * c ) {
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid ! = - 1 ) {
2011-01-07 12:15:14 -05:00
addReplyError ( c , " Background save already in progress " ) ;
return ;
}
2017-09-20 01:47:42 -04:00
rdbSaveInfo rsi , * rsiptr ;
rsiptr = rdbPopulateSaveInfo ( & rsi ) ;
if ( rdbSave ( server . rdb_filename , rsiptr ) = = C_OK ) {
2011-01-07 12:15:14 -05:00
addReply ( c , shared . ok ) ;
} else {
addReply ( c , shared . err ) ;
}
}
2016-07-21 12:34:53 -04:00
/* BGSAVE [SCHEDULE] */
2015-07-26 09:20:46 -04:00
void bgsaveCommand ( client * c ) {
2016-07-21 12:34:53 -04:00
int schedule = 0 ;
/* The SCHEDULE option changes the behavior of BGSAVE when an AOF rewrite
* is in progress . Instead of returning an error a BGSAVE gets scheduled . */
if ( c - > argc > 1 ) {
if ( c - > argc = = 2 & & ! strcasecmp ( c - > argv [ 1 ] - > ptr , " schedule " ) ) {
schedule = 1 ;
} else {
addReply ( c , shared . syntaxerr ) ;
return ;
}
}
2011-12-21 06:22:13 -05:00
if ( server . rdb_child_pid ! = - 1 ) {
2011-01-07 12:15:14 -05:00
addReplyError ( c , " Background save already in progress " ) ;
2011-12-21 06:17:02 -05:00
} else if ( server . aof_child_pid ! = - 1 ) {
2016-07-21 12:34:53 -04:00
if ( schedule ) {
server . rdb_bgsave_scheduled = 1 ;
addReplyStatus ( c , " Background saving scheduled " ) ;
} else {
addReplyError ( c ,
" An AOF log rewriting in progress: can't BGSAVE right now. "
2016-12-01 07:10:14 -05:00
" Use BGSAVE SCHEDULE in order to schedule a BGSAVE whenever "
2016-07-21 12:34:53 -04:00
" possible. " ) ;
}
PSYNC2: different improvements to Redis replication.
The gist of the changes is that now, partial resynchronizations between
slaves and masters (without the need of a full resync with RDB transfer
and so forth), work in a number of cases when it was impossible
in the past. For instance:
1. When a slave is promoted to mastrer, the slaves of the old master can
partially resynchronize with the new master.
2. Chained slalves (slaves of slaves) can be moved to replicate to other
slaves or the master itsef, without requiring a full resync.
3. The master itself, after being turned into a slave, is able to
partially resynchronize with the new master, when it joins replication
again.
In order to obtain this, the following main changes were operated:
* Slaves also take a replication backlog, not just masters.
* Same stream replication for all the slaves and sub slaves. The
replication stream is identical from the top level master to its slaves
and is also the same from the slaves to their sub-slaves and so forth.
This means that if a slave is later promoted to master, it has the
same replication backlong, and can partially resynchronize with its
slaves (that were previously slaves of the old master).
* A given replication history is no longer identified by the `runid` of
a Redis node. There is instead a `replication ID` which changes every
time the instance has a new history no longer coherent with the past
one. So, for example, slaves publish the same replication history of
their master, however when they are turned into masters, they publish
a new replication ID, but still remember the old ID, so that they are
able to partially resynchronize with slaves of the old master (up to a
given offset).
* The replication protocol was slightly modified so that a new extended
+CONTINUE reply from the master is able to inform the slave of a
replication ID change.
* REPLCONF CAPA is used in order to notify masters that a slave is able
to understand the new +CONTINUE reply.
* The RDB file was extended with an auxiliary field that is able to
select a given DB after loading in the slave, so that the slave can
continue receiving the replication stream from the point it was
disconnected without requiring the master to insert "SELECT" statements.
This is useful in order to guarantee the "same stream" property, because
the slave must be able to accumulate an identical backlog.
* Slave pings to sub-slaves are now sent in a special form, when the
top-level master is disconnected, in order to don't interfer with the
replication stream. We just use out of band "\n" bytes as in other parts
of the Redis protocol.
An old design document is available here:
https://gist.github.com/antirez/ae068f95c0d084891305
However the implementation is not identical to the description because
during the work to implement it, different changes were needed in order
to make things working well.
2016-11-09 05:31:06 -05:00
} else if ( rdbSaveBackground ( server . rdb_filename , NULL ) = = C_OK ) {
2011-01-07 12:15:14 -05:00
addReplyStatus ( c , " Background saving started " ) ;
} else {
addReply ( c , shared . err ) ;
}
}
2017-09-19 17:03:39 -04:00
/* Populate the rdbSaveInfo structure used to persist the replication
2017-09-20 01:47:42 -04:00
* information inside the RDB file .
* For master , if server . repl_backlog is not NULL , fill rdbSaveInfo with
* server . slaveseldb , otherwise just use init - 1.
* Don ' t worry , master will send SELECT command to replication stream ,
* because if server . repl_backlog is NULL , that will trigger full synchronization ,
* function replicationSetupSlaveForFullResync ( ) sets server . slaveseldb be - 1 ,
* then replicationFeedSlaves ( ) will send SELECT command when server . slaveseldb is - 1.
* For slave , currently the structure explicitly
2017-09-19 17:03:39 -04:00
* contains just the currently selected DB from the master stream , however
* if the rdbSave * ( ) family functions receive a NULL rsi structure also
* the Replication ID / offset is not saved . The function popultes ' rsi '
* that is normally stack - allocated in the caller , returns the populated
* pointer if the instance has a valid master client , otherwise NULL
2017-09-20 01:47:42 -04:00
* is returned , and the RDB saving will not persist any replication related
2017-09-19 17:03:39 -04:00
* information . */
rdbSaveInfo * rdbPopulateSaveInfo ( rdbSaveInfo * rsi ) {
rdbSaveInfo rsi_init = RDB_SAVE_INFO_INIT ;
* rsi = rsi_init ;
2017-09-20 01:47:42 -04:00
if ( ! server . masterhost ) {
if ( server . repl_backlog ) rsi - > repl_stream_db = server . slaveseldb ;
return rsi ;
}
2017-09-19 17:03:39 -04:00
if ( server . master ) {
rsi - > repl_stream_db = server . master - > db - > id ;
return rsi ;
} else {
2017-09-20 01:47:42 -04:00
serverLog ( LL_WARNING , " As a slave there is no valid master, can not persist replication information " ) ;
2017-09-19 17:03:39 -04:00
return NULL ;
}
}