redict/src/rdb.c

2839 lines
107 KiB
C
Raw Normal View History

/*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "server.h"
#include "lzf.h" /* LZF compression library */
2012-01-03 01:14:10 -05:00
#include "zipmap.h"
#include "endianconv.h"
2017-09-05 07:14:13 -04:00
#include "stream.h"
#include <math.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <arpa/inet.h>
#include <sys/stat.h>
#include <sys/param.h>
2020-10-27 05:27:27 -04:00
/* This macro is called when the internal RDB structure is corrupt */
#define rdbExitReportCorruptRDB(...) rdbReportError(1, __LINE__,__VA_ARGS__)
/* This macro is called when RDB read failed (possibly a short read) */
#define rdbReportReadError(...) rdbReportError(0, __LINE__,__VA_ARGS__)
char* rdbFileBeingLoaded = NULL; /* used for rdb checking on read error */
extern int rdbCheckMode;
void rdbCheckError(const char *fmt, ...);
void rdbCheckSetError(const char *fmt, ...);
#ifdef __GNUC__
void rdbReportError(int corruption_error, int linenum, char *reason, ...) __attribute__ ((format (printf, 3, 4)));
#endif
void rdbReportError(int corruption_error, int linenum, char *reason, ...) {
va_list ap;
char msg[1024];
int len;
len = snprintf(msg,sizeof(msg),
"Internal error in RDB reading offset %llu, function at rdb.c:%d -> ",
(unsigned long long)server.loading_loaded_bytes, linenum);
va_start(ap,reason);
vsnprintf(msg+len,sizeof(msg)-len,reason,ap);
va_end(ap);
if (!server.loading) {
/* If we're in the context of a RESTORE command, just propagate the error. */
/* log in VERBOSE, and return (don't exit). */
serverLog(LL_VERBOSE, "%s", msg);
return;
} else if (rdbCheckMode) {
/* If we're inside the rdb checker, let it handle the error. */
rdbCheckError("%s",msg);
} else if (rdbFileBeingLoaded) {
/* If we're loading an rdb file form disk, run rdb check (and exit) */
serverLog(LL_WARNING, "%s", msg);
char *argv[2] = {"",rdbFileBeingLoaded};
redis_check_rdb_main(2,argv,NULL);
} else if (corruption_error) {
/* In diskless loading, in case of corrupt file, log and exit. */
serverLog(LL_WARNING, "%s. Failure loading rdb format", msg);
} else {
/* In diskless loading, in case of a short read (not a corrupt
* file), log and proceed (don't exit). */
serverLog(LL_WARNING, "%s. Failure loading rdb format from socket, assuming connection error, resuming operation.", msg);
return;
}
serverLog(LL_WARNING, "Terminating server after rdb file reading failure.");
exit(1);
}
static int rdbWriteRaw(rio *rdb, void *p, size_t len) {
if (rdb && rioWrite(rdb,p,len) == 0)
return -1;
return len;
}
int rdbSaveType(rio *rdb, unsigned char type) {
return rdbWriteRaw(rdb,&type,1);
}
/* Load a "type" in RDB format, that is a one byte unsigned integer.
* This function is not only used to load object types, but also special
* "types" like the end-of-file type, the EXPIRE type, and so forth. */
2011-05-13 17:24:19 -04:00
int rdbLoadType(rio *rdb) {
unsigned char type;
if (rioRead(rdb,&type,1) == 0) return -1;
return type;
}
/* This is only used to load old databases stored with the RDB_OPCODE_EXPIRETIME
* opcode. New versions of Redis store using the RDB_OPCODE_EXPIRETIME_MS
* opcode. On error -1 is returned, however this could be a valid time, so
* to check for loading errors the caller should call rioGetReadError() after
* calling this function. */
2011-05-13 17:24:19 -04:00
time_t rdbLoadTime(rio *rdb) {
int32_t t32;
if (rioRead(rdb,&t32,4) == 0) return -1;
return (time_t)t32;
}
int rdbSaveMillisecondTime(rio *rdb, long long t) {
int64_t t64 = (int64_t) t;
memrev64ifbe(&t64); /* Store in little endian. */
return rdbWriteRaw(rdb,&t64,8);
}
/* This function loads a time from the RDB file. It gets the version of the
* RDB because, unfortunately, before Redis 5 (RDB version 9), the function
* failed to convert data to/from little endian, so RDB files with keys having
* expires could not be shared between big endian and little endian systems
* (because the expire time will be totally wrong). The fix for this is just
* to call memrev64ifbe(), however if we fix this for all the RDB versions,
* this call will introduce an incompatibility for big endian systems:
* after upgrading to Redis version 5 they will no longer be able to load their
* own old RDB files. Because of that, we instead fix the function only for new
* RDB versions, and load older RDB versions as we used to do in the past,
* allowing big endian systems to load their own old RDB files.
*
* On I/O error the function returns LLONG_MAX, however if this is also a
* valid stored value, the caller should use rioGetReadError() to check for
* errors after calling this function. */
long long rdbLoadMillisecondTime(rio *rdb, int rdbver) {
int64_t t64;
if (rioRead(rdb,&t64,8) == 0) return LLONG_MAX;
if (rdbver >= 9) /* Check the top comment of this function. */
memrev64ifbe(&t64); /* Convert in big endian if the system is BE. */
return (long long)t64;
}
2011-05-13 17:24:19 -04:00
/* Saves an encoded length. The first two bits in the first byte are used to
2015-07-27 03:41:48 -04:00
* hold the encoding type. See the RDB_* definitions for more information
2011-05-13 17:24:19 -04:00
* on the types of encoding. */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* Save a 6 bit len */
2015-07-27 03:41:48 -04:00
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* Save a 14 bit len */
2015-07-27 03:41:48 -04:00
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* Save a 32 bit len */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* Save a 64 bit len */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
}
2016-06-01 14:18:28 -04:00
/* Load an encoded length. If the loaded length is a normal length as stored
* with rdbSaveLen(), the read length is set to '*lenptr'. If instead the
* loaded length describes a special encoding that follows, then '*isencoded'
* is set to 1 and the encoding format is stored at '*lenptr'.
*
* See the RDB_ENC_* definitions in rdb.h for more information on special
* encodings.
*
* The function returns -1 on error, 0 on success. */
int rdbLoadLenByRef(rio *rdb, int *isencoded, uint64_t *lenptr) {
2011-05-13 17:24:19 -04:00
unsigned char buf[2];
int type;
if (isencoded) *isencoded = 0;
2016-06-01 14:18:28 -04:00
if (rioRead(rdb,buf,1) == 0) return -1;
2011-05-13 17:24:19 -04:00
type = (buf[0]&0xC0)>>6;
2015-07-27 03:41:48 -04:00
if (type == RDB_ENCVAL) {
2011-05-13 17:24:19 -04:00
/* Read a 6 bit encoding type. */
if (isencoded) *isencoded = 1;
2016-06-01 14:18:28 -04:00
*lenptr = buf[0]&0x3F;
2015-07-27 03:41:48 -04:00
} else if (type == RDB_6BITLEN) {
2011-05-13 17:24:19 -04:00
/* Read a 6 bit len. */
2016-06-01 14:18:28 -04:00
*lenptr = buf[0]&0x3F;
2015-07-27 03:41:48 -04:00
} else if (type == RDB_14BITLEN) {
2011-05-13 17:24:19 -04:00
/* Read a 14 bit len. */
2016-06-01 14:18:28 -04:00
if (rioRead(rdb,buf+1,1) == 0) return -1;
*lenptr = ((buf[0]&0x3F)<<8)|buf[1];
} else if (buf[0] == RDB_32BITLEN) {
2011-05-13 17:24:19 -04:00
/* Read a 32 bit len. */
uint32_t len;
2016-06-01 14:18:28 -04:00
if (rioRead(rdb,&len,4) == 0) return -1;
*lenptr = ntohl(len);
} else if (buf[0] == RDB_64BITLEN) {
/* Read a 64 bit len. */
uint64_t len;
2016-06-01 14:18:28 -04:00
if (rioRead(rdb,&len,8) == 0) return -1;
*lenptr = ntohu64(len);
} else {
rdbExitReportCorruptRDB(
"Unknown length encoding %d in rdbLoadLen()",type);
2016-06-01 14:18:28 -04:00
return -1; /* Never reached. */
2011-05-13 17:24:19 -04:00
}
2016-06-01 14:18:28 -04:00
return 0;
}
/* This is like rdbLoadLenByRef() but directly returns the value read
* from the RDB stream, signaling an error by returning RDB_LENERR
* (since it is a too large count to be applicable in any Redis data
* structure). */
uint64_t rdbLoadLen(rio *rdb, int *isencoded) {
uint64_t len;
if (rdbLoadLenByRef(rdb,isencoded,&len) == -1) return RDB_LENERR;
return len;
2011-05-13 17:24:19 -04:00
}
/* Encodes the "value" argument as integer when it fits in the supported ranges
* for encoded types. If the function successfully encodes the integer, the
* representation is stored in the buffer pointer to by "enc" and the string
* length is returned. Otherwise 0 is returned. */
int rdbEncodeInteger(long long value, unsigned char *enc) {
if (value >= -(1<<7) && value <= (1<<7)-1) {
2015-07-27 03:41:48 -04:00
enc[0] = (RDB_ENCVAL<<6)|RDB_ENC_INT8;
enc[1] = value&0xFF;
return 2;
} else if (value >= -(1<<15) && value <= (1<<15)-1) {
2015-07-27 03:41:48 -04:00
enc[0] = (RDB_ENCVAL<<6)|RDB_ENC_INT16;
enc[1] = value&0xFF;
enc[2] = (value>>8)&0xFF;
return 3;
} else if (value >= -((long long)1<<31) && value <= ((long long)1<<31)-1) {
2015-07-27 03:41:48 -04:00
enc[0] = (RDB_ENCVAL<<6)|RDB_ENC_INT32;
enc[1] = value&0xFF;
enc[2] = (value>>8)&0xFF;
enc[3] = (value>>16)&0xFF;
enc[4] = (value>>24)&0xFF;
return 5;
} else {
return 0;
}
}
2011-05-13 17:24:19 -04:00
/* Loads an integer-encoded object with the specified encoding type "enctype".
* The returned value changes according to the flags, see
* rdbGenericLoadStringObject() for more info. */
void *rdbLoadIntegerObject(rio *rdb, int enctype, int flags, size_t *lenptr) {
int plain = flags & RDB_LOAD_PLAIN;
int sds = flags & RDB_LOAD_SDS;
int encode = flags & RDB_LOAD_ENC;
2011-05-13 17:24:19 -04:00
unsigned char enc[4];
long long val;
2015-07-27 03:41:48 -04:00
if (enctype == RDB_ENC_INT8) {
2011-05-13 17:24:19 -04:00
if (rioRead(rdb,enc,1) == 0) return NULL;
val = (signed char)enc[0];
2015-07-27 03:41:48 -04:00
} else if (enctype == RDB_ENC_INT16) {
2011-05-13 17:24:19 -04:00
uint16_t v;
if (rioRead(rdb,enc,2) == 0) return NULL;
v = enc[0]|(enc[1]<<8);
val = (int16_t)v;
2015-07-27 03:41:48 -04:00
} else if (enctype == RDB_ENC_INT32) {
2011-05-13 17:24:19 -04:00
uint32_t v;
if (rioRead(rdb,enc,4) == 0) return NULL;
v = enc[0]|(enc[1]<<8)|(enc[2]<<16)|(enc[3]<<24);
val = (int32_t)v;
} else {
rdbExitReportCorruptRDB("Unknown RDB integer encoding type %d",enctype);
return NULL; /* Never reached. */
2011-05-13 17:24:19 -04:00
}
if (plain || sds) {
2015-07-27 03:41:48 -04:00
char buf[LONG_STR_SIZE], *p;
int len = ll2string(buf,sizeof(buf),val);
if (lenptr) *lenptr = len;
p = plain ? zmalloc(len) : sdsnewlen(SDS_NOINIT,len);
memcpy(p,buf,len);
return p;
} else if (encode) {
return createStringObjectFromLongLongForValue(val);
} else {
return createObject(OBJ_STRING,sdsfromlonglong(val));
}
2011-05-13 17:24:19 -04:00
}
/* String objects in the form "2391" "-100" without any space and with a
* range of values that can fit in an 8, 16 or 32 bit signed value can be
* encoded as integers to save space */
int rdbTryIntegerEncoding(char *s, size_t len, unsigned char *enc) {
long long value;
char *endptr, buf[32];
/* Check if it's possible to encode this value as a number */
value = strtoll(s, &endptr, 10);
if (endptr[0] != '\0') return 0;
ll2string(buf,32,value);
/* If the number converted back into a string is not identical
* then it's not possible to encode the string as integer */
if (strlen(buf) != len || memcmp(buf,s,len)) return 0;
return rdbEncodeInteger(value,enc);
}
ssize_t rdbSaveLzfBlob(rio *rdb, void *data, size_t compress_len,
size_t original_len) {
unsigned char byte;
ssize_t n, nwritten = 0;
/* Data compressed! Let's save it on disk */
2015-07-27 03:41:48 -04:00
byte = (RDB_ENCVAL<<6)|RDB_ENC_LZF;
if ((n = rdbWriteRaw(rdb,&byte,1)) == -1) goto writeerr;
nwritten += n;
if ((n = rdbSaveLen(rdb,compress_len)) == -1) goto writeerr;
nwritten += n;
if ((n = rdbSaveLen(rdb,original_len)) == -1) goto writeerr;
nwritten += n;
if ((n = rdbWriteRaw(rdb,data,compress_len)) == -1) goto writeerr;
nwritten += n;
return nwritten;
writeerr:
return -1;
}
ssize_t rdbSaveLzfStringObject(rio *rdb, unsigned char *s, size_t len) {
size_t comprlen, outlen;
void *out;
/* We require at least four bytes compression for this to be worth it */
if (len <= 4) return 0;
outlen = len-4;
if ((out = zmalloc(outlen+1)) == NULL) return 0;
comprlen = lzf_compress(s, len, out, outlen);
if (comprlen == 0) {
zfree(out);
return 0;
}
ssize_t nwritten = rdbSaveLzfBlob(rdb, out, comprlen, len);
zfree(out);
return nwritten;
}
/* Load an LZF compressed string in RDB format. The returned value
* changes according to 'flags'. For more info check the
* rdbGenericLoadStringObject() function. */
void *rdbLoadLzfStringObject(rio *rdb, int flags, size_t *lenptr) {
int plain = flags & RDB_LOAD_PLAIN;
int sds = flags & RDB_LOAD_SDS;
2016-06-01 14:18:28 -04:00
uint64_t len, clen;
2011-05-13 17:24:19 -04:00
unsigned char *c = NULL;
char *val = NULL;
2011-05-13 17:24:19 -04:00
2015-07-27 03:41:48 -04:00
if ((clen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
2011-05-13 17:24:19 -04:00
if ((c = zmalloc(clen)) == NULL) goto err;
/* Allocate our target according to the uncompressed size. */
if (plain) {
val = zmalloc(len);
} else {
val = sdsnewlen(SDS_NOINIT,len);
}
if (lenptr) *lenptr = len;
/* Load the compressed representation and uncompress it to target. */
2011-05-13 17:24:19 -04:00
if (rioRead(rdb,c,clen) == 0) goto err;
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (lzf_decompress(c,clen,val,len) != len) {
rdbExitReportCorruptRDB("Invalid LZF compressed string");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
goto err;
}
2011-05-13 17:24:19 -04:00
zfree(c);
if (plain || sds) {
return val;
} else {
return createObject(OBJ_STRING,val);
}
2011-05-13 17:24:19 -04:00
err:
zfree(c);
if (plain)
zfree(val);
else
sdsfree(val);
2011-05-13 17:24:19 -04:00
return NULL;
}
2013-01-16 12:00:20 -05:00
/* Save a string object as [len][data] on disk. If the object is a string
* representation of an integer value we try to save it in a special form */
ssize_t rdbSaveRawString(rio *rdb, unsigned char *s, size_t len) {
int enclen;
ssize_t n, nwritten = 0;
/* Try integer encoding */
if (len <= 11) {
unsigned char buf[5];
if ((enclen = rdbTryIntegerEncoding((char*)s,len,buf)) > 0) {
if (rdbWriteRaw(rdb,buf,enclen) == -1) return -1;
return enclen;
}
}
/* Try LZF compression - under 20 bytes it's unable to compress even
* aaaaaaaaaaaaaaaaaa so skip it */
2011-12-21 06:22:13 -05:00
if (server.rdb_compression && len > 20) {
n = rdbSaveLzfStringObject(rdb,s,len);
if (n == -1) return -1;
if (n > 0) return n;
/* Return value of 0 means data can't be compressed, save the old way */
}
/* Store verbatim */
if ((n = rdbSaveLen(rdb,len)) == -1) return -1;
nwritten += n;
if (len > 0) {
if (rdbWriteRaw(rdb,s,len) == -1) return -1;
nwritten += len;
}
return nwritten;
}
/* Save a long long value as either an encoded string or a string. */
ssize_t rdbSaveLongLongAsStringObject(rio *rdb, long long value) {
unsigned char buf[32];
ssize_t n, nwritten = 0;
int enclen = rdbEncodeInteger(value,buf);
if (enclen > 0) {
return rdbWriteRaw(rdb,buf,enclen);
} else {
/* Encode as string */
enclen = ll2string((char*)buf,32,value);
2015-07-26 09:29:53 -04:00
serverAssert(enclen < 32);
if ((n = rdbSaveLen(rdb,enclen)) == -1) return -1;
nwritten += n;
if ((n = rdbWriteRaw(rdb,buf,enclen)) == -1) return -1;
nwritten += n;
}
return nwritten;
}
/* Like rdbSaveRawString() gets a Redis object instead. */
ssize_t rdbSaveStringObject(rio *rdb, robj *obj) {
/* Avoid to decode the object, then encode it again, if the
2013-01-16 12:00:20 -05:00
* object is already integer encoded. */
if (obj->encoding == OBJ_ENCODING_INT) {
return rdbSaveLongLongAsStringObject(rdb,(long)obj->ptr);
} else {
2015-07-26 09:29:53 -04:00
serverAssertWithInfo(NULL,obj,sdsEncodedObject(obj));
return rdbSaveRawString(rdb,obj->ptr,sdslen(obj->ptr));
}
}
/* Load a string object from an RDB file according to flags:
*
* RDB_LOAD_NONE (no flags): load an RDB object, unencoded.
* RDB_LOAD_ENC: If the returned type is a Redis object, try to
* encode it in a special way to be more memory
* efficient. When this flag is passed the function
* no longer guarantees that obj->ptr is an SDS string.
* RDB_LOAD_PLAIN: Return a plain string allocated with zmalloc()
2016-04-25 09:49:57 -04:00
* instead of a Redis object with an sds in it.
* RDB_LOAD_SDS: Return an SDS string instead of a Redis object.
*
* On I/O error NULL is returned.
*/
void *rdbGenericLoadStringObject(rio *rdb, int flags, size_t *lenptr) {
int encode = flags & RDB_LOAD_ENC;
int plain = flags & RDB_LOAD_PLAIN;
int sds = flags & RDB_LOAD_SDS;
2011-05-13 17:24:19 -04:00
int isencoded;
unsigned long long len;
2011-05-13 17:24:19 -04:00
len = rdbLoadLen(rdb,&isencoded);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (len == RDB_LENERR) return NULL;
2011-05-13 17:24:19 -04:00
if (isencoded) {
switch(len) {
2015-07-27 03:41:48 -04:00
case RDB_ENC_INT8:
case RDB_ENC_INT16:
case RDB_ENC_INT32:
return rdbLoadIntegerObject(rdb,len,flags,lenptr);
2015-07-27 03:41:48 -04:00
case RDB_ENC_LZF:
return rdbLoadLzfStringObject(rdb,flags,lenptr);
2011-05-13 17:24:19 -04:00
default:
rdbExitReportCorruptRDB("Unknown RDB string encoding type %llu",len);
return NULL;
2011-05-13 17:24:19 -04:00
}
}
if (plain || sds) {
void *buf = plain ? zmalloc(len) : sdsnewlen(SDS_NOINIT,len);
if (lenptr) *lenptr = len;
if (len && rioRead(rdb,buf,len) == 0) {
if (plain)
zfree(buf);
else
sdsfree(buf);
return NULL;
}
return buf;
} else {
robj *o = encode ? createStringObject(SDS_NOINIT,len) :
createRawStringObject(SDS_NOINIT,len);
if (len && rioRead(rdb,o->ptr,len) == 0) {
decrRefCount(o);
return NULL;
}
return o;
2011-05-13 17:24:19 -04:00
}
}
robj *rdbLoadStringObject(rio *rdb) {
return rdbGenericLoadStringObject(rdb,RDB_LOAD_NONE,NULL);
2011-05-13 17:24:19 -04:00
}
robj *rdbLoadEncodedStringObject(rio *rdb) {
return rdbGenericLoadStringObject(rdb,RDB_LOAD_ENC,NULL);
2011-05-13 17:24:19 -04:00
}
/* Save a double value. Doubles are saved as strings prefixed by an unsigned
2013-01-16 12:00:20 -05:00
* 8 bit integer specifying the length of the representation.
* This 8 bit integer has special values in order to specify the following
* conditions:
* 253: not a number
* 254: + inf
* 255: - inf
*/
int rdbSaveDoubleValue(rio *rdb, double val) {
unsigned char buf[128];
int len;
if (isnan(val)) {
buf[0] = 253;
len = 1;
} else if (!isfinite(val)) {
len = 1;
buf[0] = (val < 0) ? 255 : 254;
} else {
#if (DBL_MANT_DIG >= 52) && (LLONG_MAX == 0x7fffffffffffffffLL)
/* Check if the float is in a safe range to be casted into a
* long long. We are assuming that long long is 64 bit here.
* Also we are assuming that there are no implementations around where
* double has precision < 52 bit.
*
* Under this assumptions we test if a double is inside an interval
* where casting to long long is safe. Then using two castings we
* make sure the decimal part is zero. If all this is true we use
* integer printing function that is much faster. */
double min = -4503599627370495; /* (2^52)-1 */
double max = 4503599627370496; /* -(2^52) */
if (val > min && val < max && val == ((double)((long long)val)))
2014-05-12 05:05:18 -04:00
ll2string((char*)buf+1,sizeof(buf)-1,(long long)val);
else
#endif
snprintf((char*)buf+1,sizeof(buf)-1,"%.17g",val);
buf[0] = strlen((char*)buf+1);
len = buf[0]+1;
}
return rdbWriteRaw(rdb,buf,len);
}
2011-05-13 17:24:19 -04:00
/* For information about double serialization check rdbSaveDoubleValue() */
int rdbLoadDoubleValue(rio *rdb, double *val) {
char buf[256];
2011-05-13 17:24:19 -04:00
unsigned char len;
if (rioRead(rdb,&len,1) == 0) return -1;
switch(len) {
case 255: *val = R_NegInf; return 0;
case 254: *val = R_PosInf; return 0;
case 253: *val = R_Nan; return 0;
default:
if (rioRead(rdb,buf,len) == 0) return -1;
buf[len] = '\0';
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (sscanf(buf, "%lg", val)!=1) return -1;
2011-05-13 17:24:19 -04:00
return 0;
}
}
/* Saves a double for RDB 8 or greater, where IE754 binary64 format is assumed.
* We just make sure the integer is always stored in little endian, otherwise
* the value is copied verbatim from memory to disk.
*
* Return -1 on error, the size of the serialized value on success. */
int rdbSaveBinaryDoubleValue(rio *rdb, double val) {
memrev64ifbe(&val);
return rdbWriteRaw(rdb,&val,sizeof(val));
}
/* Loads a double from RDB 8 or greater. See rdbSaveBinaryDoubleValue() for
* more info. On error -1 is returned, otherwise 0. */
int rdbLoadBinaryDoubleValue(rio *rdb, double *val) {
if (rioRead(rdb,val,sizeof(*val)) == 0) return -1;
memrev64ifbe(val);
return 0;
}
/* Like rdbSaveBinaryDoubleValue() but single precision. */
int rdbSaveBinaryFloatValue(rio *rdb, float val) {
memrev32ifbe(&val);
return rdbWriteRaw(rdb,&val,sizeof(val));
}
/* Like rdbLoadBinaryDoubleValue() but single precision. */
int rdbLoadBinaryFloatValue(rio *rdb, float *val) {
if (rioRead(rdb,val,sizeof(*val)) == 0) return -1;
memrev32ifbe(val);
return 0;
}
2011-05-13 17:24:19 -04:00
/* Save the object type of object "o". */
int rdbSaveObjectType(rio *rdb, robj *o) {
switch (o->type) {
case OBJ_STRING:
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_STRING);
case OBJ_LIST:
if (o->encoding == OBJ_ENCODING_QUICKLIST)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_LIST_QUICKLIST);
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic("Unknown list encoding");
case OBJ_SET:
if (o->encoding == OBJ_ENCODING_INTSET)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_SET_INTSET);
else if (o->encoding == OBJ_ENCODING_HT)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_SET);
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic("Unknown set encoding");
case OBJ_ZSET:
if (o->encoding == OBJ_ENCODING_ZIPLIST)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_ZSET_ZIPLIST);
else if (o->encoding == OBJ_ENCODING_SKIPLIST)
return rdbSaveType(rdb,RDB_TYPE_ZSET_2);
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic("Unknown sorted set encoding");
case OBJ_HASH:
if (o->encoding == OBJ_ENCODING_ZIPLIST)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_HASH_ZIPLIST);
else if (o->encoding == OBJ_ENCODING_HT)
2015-07-27 03:41:48 -04:00
return rdbSaveType(rdb,RDB_TYPE_HASH);
2011-05-13 17:24:19 -04:00
else
2015-07-27 03:41:48 -04:00
serverPanic("Unknown hash encoding");
2017-09-05 07:14:13 -04:00
case OBJ_STREAM:
return rdbSaveType(rdb,RDB_TYPE_STREAM_LISTPACKS);
case OBJ_MODULE:
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
return rdbSaveType(rdb,RDB_TYPE_MODULE_2);
2011-05-13 17:24:19 -04:00
default:
2015-07-27 03:41:48 -04:00
serverPanic("Unknown object type");
2011-05-13 17:24:19 -04:00
}
return -1; /* avoid warning */
}
/* Use rdbLoadType() to load a TYPE in RDB format, but returns -1 if the
* type is not specifically a valid Object Type. */
2011-05-13 17:24:19 -04:00
int rdbLoadObjectType(rio *rdb) {
int type;
if ((type = rdbLoadType(rdb)) == -1) return -1;
if (!rdbIsObjectType(type)) return -1;
return type;
}
/* This helper function serializes a consumer group Pending Entries List (PEL)
* into the RDB file. The 'nacks' argument tells the function if also persist
* the informations about the not acknowledged message, or if to persist
* just the IDs: this is useful because for the global consumer group PEL
* we serialized the NACKs as well, but when serializing the local consumer
* PELs we just add the ID, that will be resolved inside the global PEL to
* put a reference to the same structure. */
ssize_t rdbSaveStreamPEL(rio *rdb, rax *pel, int nacks) {
ssize_t n, nwritten = 0;
/* Number of entries in the PEL. */
if ((n = rdbSaveLen(rdb,raxSize(pel))) == -1) return -1;
nwritten += n;
/* Save each entry. */
raxIterator ri;
raxStart(&ri,pel);
raxSeek(&ri,"^",NULL,0);
while(raxNext(&ri)) {
/* We store IDs in raw form as 128 big big endian numbers, like
* they are inside the radix tree key. */
if ((n = rdbWriteRaw(rdb,ri.key,sizeof(streamID))) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
if (nacks) {
streamNACK *nack = ri.data;
if ((n = rdbSaveMillisecondTime(rdb,nack->delivery_time)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
if ((n = rdbSaveLen(rdb,nack->delivery_count)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
/* We don't save the consumer name: we'll save the pending IDs
* for each consumer in the consumer PEL, and resolve the consumer
* at loading time. */
}
}
raxStop(&ri);
return nwritten;
}
2018-01-31 11:06:32 -05:00
/* Serialize the consumers of a stream consumer group into the RDB. Helper
* function for the stream data type serialization. What we do here is to
* persist the consumer metadata, and it's PEL, for each consumer. */
size_t rdbSaveStreamConsumers(rio *rdb, streamCG *cg) {
ssize_t n, nwritten = 0;
/* Number of consumers in this consumer group. */
if ((n = rdbSaveLen(rdb,raxSize(cg->consumers))) == -1) return -1;
nwritten += n;
/* Save each consumer. */
raxIterator ri;
raxStart(&ri,cg->consumers);
raxSeek(&ri,"^",NULL,0);
while(raxNext(&ri)) {
streamConsumer *consumer = ri.data;
/* Consumer name. */
if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) {
raxStop(&ri);
return -1;
}
2018-01-31 11:06:32 -05:00
nwritten += n;
/* Last seen time. */
if ((n = rdbSaveMillisecondTime(rdb,consumer->seen_time)) == -1) {
raxStop(&ri);
2018-01-31 11:06:32 -05:00
return -1;
}
2018-01-31 11:06:32 -05:00
nwritten += n;
/* Consumer PEL, without the ACKs (see last parameter of the function
* passed with value of 0), at loading time we'll lookup the ID
* in the consumer group global PEL and will put a reference in the
* consumer local PEL. */
if ((n = rdbSaveStreamPEL(rdb,consumer->pel,0)) == -1) {
raxStop(&ri);
2018-01-31 11:06:32 -05:00
return -1;
}
2018-01-31 11:06:32 -05:00
nwritten += n;
}
raxStop(&ri);
return nwritten;
}
/* Save a Redis object.
* Returns -1 on error, number of bytes written on success. */
2016-11-30 14:47:02 -05:00
ssize_t rdbSaveObject(rio *rdb, robj *o, robj *key) {
ssize_t n = 0, nwritten = 0;
if (o->type == OBJ_STRING) {
/* Save a string value */
if ((n = rdbSaveStringObject(rdb,o)) == -1) return -1;
nwritten += n;
} else if (o->type == OBJ_LIST) {
/* Save a list value */
if (o->encoding == OBJ_ENCODING_QUICKLIST) {
quicklist *ql = o->ptr;
quicklistNode *node = ql->head;
if ((n = rdbSaveLen(rdb,ql->len)) == -1) return -1;
nwritten += n;
while(node) {
if (quicklistNodeIsCompressed(node)) {
void *data;
size_t compress_len = quicklistGetLzf(node, &data);
if ((n = rdbSaveLzfBlob(rdb,data,compress_len,node->sz)) == -1) return -1;
nwritten += n;
} else {
if ((n = rdbSaveRawString(rdb,node->zl,node->sz)) == -1) return -1;
nwritten += n;
}
node = node->next;
}
} else {
2015-07-27 03:41:48 -04:00
serverPanic("Unknown list encoding");
}
} else if (o->type == OBJ_SET) {
/* Save a set value */
if (o->encoding == OBJ_ENCODING_HT) {
dict *set = o->ptr;
dictIterator *di = dictGetIterator(set);
dictEntry *de;
if ((n = rdbSaveLen(rdb,dictSize(set))) == -1) {
dictReleaseIterator(di);
return -1;
}
nwritten += n;
while((de = dictNext(di)) != NULL) {
sds ele = dictGetKey(de);
if ((n = rdbSaveRawString(rdb,(unsigned char*)ele,sdslen(ele)))
== -1)
{
dictReleaseIterator(di);
return -1;
}
nwritten += n;
}
dictReleaseIterator(di);
} else if (o->encoding == OBJ_ENCODING_INTSET) {
size_t l = intsetBlobLen((intset*)o->ptr);
if ((n = rdbSaveRawString(rdb,o->ptr,l)) == -1) return -1;
nwritten += n;
} else {
2015-07-27 03:41:48 -04:00
serverPanic("Unknown set encoding");
}
} else if (o->type == OBJ_ZSET) {
/* Save a sorted set value */
if (o->encoding == OBJ_ENCODING_ZIPLIST) {
size_t l = ziplistBlobLen((unsigned char*)o->ptr);
if ((n = rdbSaveRawString(rdb,o->ptr,l)) == -1) return -1;
nwritten += n;
} else if (o->encoding == OBJ_ENCODING_SKIPLIST) {
zset *zs = o->ptr;
zskiplist *zsl = zs->zsl;
if ((n = rdbSaveLen(rdb,zsl->length)) == -1) return -1;
nwritten += n;
/* We save the skiplist elements from the greatest to the smallest
* (that's trivial since the elements are already ordered in the
* skiplist): this improves the load process, since the next loaded
* element will always be the smaller, so adding to the skiplist
* will always immediately stop at the head, making the insertion
* O(1) instead of O(log(N)). */
zskiplistNode *zn = zsl->tail;
while (zn != NULL) {
if ((n = rdbSaveRawString(rdb,
(unsigned char*)zn->ele,sdslen(zn->ele))) == -1)
{
return -1;
}
nwritten += n;
if ((n = rdbSaveBinaryDoubleValue(rdb,zn->score)) == -1)
return -1;
nwritten += n;
zn = zn->backward;
}
} else {
2015-07-27 03:41:48 -04:00
serverPanic("Unknown sorted set encoding");
}
} else if (o->type == OBJ_HASH) {
/* Save a hash value */
if (o->encoding == OBJ_ENCODING_ZIPLIST) {
2012-01-03 01:14:10 -05:00
size_t l = ziplistBlobLen((unsigned char*)o->ptr);
if ((n = rdbSaveRawString(rdb,o->ptr,l)) == -1) return -1;
nwritten += n;
2012-01-03 01:14:10 -05:00
} else if (o->encoding == OBJ_ENCODING_HT) {
dictIterator *di = dictGetIterator(o->ptr);
dictEntry *de;
if ((n = rdbSaveLen(rdb,dictSize((dict*)o->ptr))) == -1) {
dictReleaseIterator(di);
return -1;
}
nwritten += n;
while((de = dictNext(di)) != NULL) {
sds field = dictGetKey(de);
sds value = dictGetVal(de);
if ((n = rdbSaveRawString(rdb,(unsigned char*)field,
sdslen(field))) == -1)
{
dictReleaseIterator(di);
return -1;
}
nwritten += n;
if ((n = rdbSaveRawString(rdb,(unsigned char*)value,
sdslen(value))) == -1)
{
dictReleaseIterator(di);
return -1;
}
nwritten += n;
}
dictReleaseIterator(di);
2012-01-03 01:14:10 -05:00
} else {
2015-07-27 03:41:48 -04:00
serverPanic("Unknown hash encoding");
}
2017-09-05 07:14:13 -04:00
} else if (o->type == OBJ_STREAM) {
/* Store how many listpacks we have inside the radix tree. */
stream *s = o->ptr;
rax *rax = s->rax;
if ((n = rdbSaveLen(rdb,raxSize(rax))) == -1) return -1;
nwritten += n;
2012-01-03 01:14:10 -05:00
2017-09-05 07:14:13 -04:00
/* Serialize all the listpacks inside the radix tree as they are,
* when loading back, we'll use the first entry of each listpack
* to insert it back into the radix tree. */
raxIterator ri;
raxStart(&ri,rax);
raxSeek(&ri,"^",NULL,0);
while (raxNext(&ri)) {
unsigned char *lp = ri.data;
size_t lp_bytes = lpBytes(lp);
if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
if ((n = rdbSaveRawString(rdb,lp,lp_bytes)) == -1) {
raxStop(&ri);
return -1;
}
2017-09-05 07:14:13 -04:00
nwritten += n;
}
raxStop(&ri);
2017-09-06 06:00:18 -04:00
/* Save the number of elements inside the stream. We cannot obtain
* this easily later, since our macro nodes should be checked for
* number of items: not a great CPU / space tradeoff. */
if ((n = rdbSaveLen(rdb,s->length)) == -1) return -1;
nwritten += n;
/* Save the last entry ID. */
if ((n = rdbSaveLen(rdb,s->last_id.ms)) == -1) return -1;
nwritten += n;
if ((n = rdbSaveLen(rdb,s->last_id.seq)) == -1) return -1;
nwritten += n;
/* The consumer groups and their clients are part of the stream
* type, so serialize every consumer group. */
/* Save the number of groups. */
size_t num_cgroups = s->cgroups ? raxSize(s->cgroups) : 0;
if ((n = rdbSaveLen(rdb,num_cgroups)) == -1) return -1;
nwritten += n;
if (num_cgroups) {
/* Serialize each consumer group. */
raxStart(&ri,s->cgroups);
raxSeek(&ri,"^",NULL,0);
while(raxNext(&ri)) {
streamCG *cg = ri.data;
/* Save the group name. */
if ((n = rdbSaveRawString(rdb,ri.key,ri.key_len)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
/* Last ID. */
if ((n = rdbSaveLen(rdb,cg->last_id.ms)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
if ((n = rdbSaveLen(rdb,cg->last_id.seq)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
/* Save the global PEL. */
if ((n = rdbSaveStreamPEL(rdb,cg->pel,1)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
/* Save the consumers of this group. */
if ((n = rdbSaveStreamConsumers(rdb,cg)) == -1) {
raxStop(&ri);
return -1;
}
nwritten += n;
}
raxStop(&ri);
}
} else if (o->type == OBJ_MODULE) {
/* Save a module-specific value. */
RedisModuleIO io;
moduleValue *mv = o->ptr;
moduleType *mt = mv->type;
/* Write the "module" identifier as prefix, so that we'll be able
* to call the right module during loading. */
int retval = rdbSaveLen(rdb,mt->id);
if (retval == -1) return -1;
moduleInitIOContext(io,mt,rdb,key);
io.bytes += retval;
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* Then write the module-specific representation + EOF marker. */
mt->rdb_save(&io,mv->value);
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
retval = rdbSaveLen(rdb,RDB_MODULE_OPCODE_EOF);
if (retval == -1)
io.error = 1;
else
io.bytes += retval;
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
if (io.ctx) {
moduleFreeContext(io.ctx);
zfree(io.ctx);
}
2016-06-05 09:34:43 -04:00
return io.error ? -1 : (ssize_t)io.bytes;
} else {
2015-07-27 03:41:48 -04:00
serverPanic("Unknown object type");
}
return nwritten;
}
/* Return the length the object will have on disk if saved with
* the rdbSaveObject() function. Currently we use a trick to get
* this length with very little changes to the code. In the future
* we could switch to a faster solution. */
size_t rdbSavedObjectLen(robj *o, robj *key) {
ssize_t len = rdbSaveObject(NULL,o,key);
2015-07-26 09:29:53 -04:00
serverAssertWithInfo(NULL,o,len != -1);
return len;
}
/* Save a key-value pair, with expire time, type, key, value.
* On error -1 is returned.
2013-01-16 12:00:20 -05:00
* On success if the key was actually saved 1 is returned, otherwise 0
* is returned (the key was already expired). */
int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) {
int savelru = server.maxmemory_policy & MAXMEMORY_FLAG_LRU;
int savelfu = server.maxmemory_policy & MAXMEMORY_FLAG_LFU;
/* Save the expire time */
if (expiretime != -1) {
2015-07-27 03:41:48 -04:00
if (rdbSaveType(rdb,RDB_OPCODE_EXPIRETIME_MS) == -1) return -1;
if (rdbSaveMillisecondTime(rdb,expiretime) == -1) return -1;
}
/* Save the LRU info. */
if (savelru) {
uint64_t idletime = estimateObjectIdleTime(val);
idletime /= 1000; /* Using seconds is enough and requires less space.*/
if (rdbSaveType(rdb,RDB_OPCODE_IDLE) == -1) return -1;
if (rdbSaveLen(rdb,idletime) == -1) return -1;
}
/* Save the LFU info. */
if (savelfu) {
uint8_t buf[1];
buf[0] = LFUDecrAndReturn(val);
/* We can encode this in exactly two bytes: the opcode and an 8
* bit counter, since the frequency is logarithmic with a 0-255 range.
* Note that we do not store the halving time because to reset it
* a single time when loading does not affect the frequency much. */
if (rdbSaveType(rdb,RDB_OPCODE_FREQ) == -1) return -1;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
}
/* Save type, key, value */
if (rdbSaveObjectType(rdb,val) == -1) return -1;
if (rdbSaveStringObject(rdb,key) == -1) return -1;
2016-11-30 14:47:02 -05:00
if (rdbSaveObject(rdb,val,key) == -1) return -1;
/* Delay return if required (for testing) */
if (server.rdb_key_save_delay)
debugDelay(server.rdb_key_save_delay);
return 1;
}
/* Save an AUX field. */
ssize_t rdbSaveAuxField(rio *rdb, void *key, size_t keylen, void *val, size_t vallen) {
ssize_t ret, len = 0;
if ((ret = rdbSaveType(rdb,RDB_OPCODE_AUX)) == -1) return -1;
len += ret;
if ((ret = rdbSaveRawString(rdb,key,keylen)) == -1) return -1;
len += ret;
if ((ret = rdbSaveRawString(rdb,val,vallen)) == -1) return -1;
len += ret;
return len;
}
/* Wrapper for rdbSaveAuxField() used when key/val length can be obtained
* with strlen(). */
ssize_t rdbSaveAuxFieldStrStr(rio *rdb, char *key, char *val) {
return rdbSaveAuxField(rdb,key,strlen(key),val,strlen(val));
}
/* Wrapper for strlen(key) + integer type (up to long long range). */
ssize_t rdbSaveAuxFieldStrInt(rio *rdb, char *key, long long val) {
2015-07-27 03:41:48 -04:00
char buf[LONG_STR_SIZE];
int vlen = ll2string(buf,sizeof(buf),val);
return rdbSaveAuxField(rdb,key,strlen(key),buf,vlen);
}
/* Save a few default AUX fields with information about the RDB generated. */
int rdbSaveInfoAuxFields(rio *rdb, int rdbflags, rdbSaveInfo *rsi) {
int redis_bits = (sizeof(void*) == 8) ? 64 : 32;
int aof_preamble = (rdbflags & RDBFLAGS_AOF_PREAMBLE) != 0;
/* Add a few fields about the state when the RDB was created. */
if (rdbSaveAuxFieldStrStr(rdb,"redis-ver",REDIS_VERSION) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"redis-bits",redis_bits) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"ctime",time(NULL)) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"used-mem",zmalloc_used_memory()) == -1) return -1;
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
/* Handle saving options that generate aux fields. */
if (rsi) {
if (rdbSaveAuxFieldStrInt(rdb,"repl-stream-db",rsi->repl_stream_db)
== -1) return -1;
if (rdbSaveAuxFieldStrStr(rdb,"repl-id",server.replid)
== -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",server.master_repl_offset)
== -1) return -1;
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
}
2016-08-09 10:41:40 -04:00
if (rdbSaveAuxFieldStrInt(rdb,"aof-preamble",aof_preamble) == -1) return -1;
return 1;
}
ssize_t rdbSaveSingleModuleAux(rio *rdb, int when, moduleType *mt) {
/* Save a module-specific aux value. */
RedisModuleIO io;
int retval = rdbSaveType(rdb, RDB_OPCODE_MODULE_AUX);
if (retval == -1) return -1;
moduleInitIOContext(io,mt,rdb,NULL);
io.bytes += retval;
/* Write the "module" identifier as prefix, so that we'll be able
* to call the right module during loading. */
retval = rdbSaveLen(rdb,mt->id);
if (retval == -1) return -1;
io.bytes += retval;
/* write the 'when' so that we can provide it on loading. add a UINT opcode
* for backwards compatibility, everything after the MT needs to be prefixed
* by an opcode. */
retval = rdbSaveLen(rdb,RDB_MODULE_OPCODE_UINT);
if (retval == -1) return -1;
io.bytes += retval;
retval = rdbSaveLen(rdb,when);
if (retval == -1) return -1;
io.bytes += retval;
/* Then write the module-specific representation + EOF marker. */
mt->aux_save(&io,when);
retval = rdbSaveLen(rdb,RDB_MODULE_OPCODE_EOF);
if (retval == -1)
io.error = 1;
else
io.bytes += retval;
if (io.ctx) {
moduleFreeContext(io.ctx);
zfree(io.ctx);
}
if (io.error)
return -1;
return io.bytes;
}
/* Produces a dump of the database in RDB format sending it to the specified
* Redis I/O channel. On success C_OK is returned, otherwise C_ERR
* is returned and part of the output, or all the output, can be
* missing because of I/O errors.
*
* When the function returns C_ERR and if 'error' is not NULL, the
* integer pointed by 'error' is set to the value of errno just after the I/O
* error. */
int rdbSaveRio(rio *rdb, int *error, int rdbflags, rdbSaveInfo *rsi) {
dictIterator *di = NULL;
dictEntry *de;
char magic[10];
int j;
uint64_t cksum;
2016-08-09 05:07:32 -04:00
size_t processed = 0;
if (server.rdb_checksum)
rdb->update_cksum = rioGenericUpdateChecksum;
2015-07-27 03:41:48 -04:00
snprintf(magic,sizeof(magic),"REDIS%04d",RDB_VERSION);
if (rdbWriteRaw(rdb,magic,9) == -1) goto werr;
if (rdbSaveInfoAuxFields(rdb,rdbflags,rsi) == -1) goto werr;
if (rdbSaveModulesAux(rdb, REDISMODULE_AUX_BEFORE_RDB) == -1) goto werr;
for (j = 0; j < server.dbnum; j++) {
redisDb *db = server.db+j;
dict *d = db->dict;
if (dictSize(d) == 0) continue;
di = dictGetSafeIterator(d);
/* Write the SELECT DB opcode */
2015-07-27 03:41:48 -04:00
if (rdbSaveType(rdb,RDB_OPCODE_SELECTDB) == -1) goto werr;
if (rdbSaveLen(rdb,j) == -1) goto werr;
/* Write the RESIZE DB opcode. */
uint64_t db_size, expires_size;
db_size = dictSize(db->dict);
expires_size = dictSize(db->expires);
2015-07-27 03:41:48 -04:00
if (rdbSaveType(rdb,RDB_OPCODE_RESIZEDB) == -1) goto werr;
if (rdbSaveLen(rdb,db_size) == -1) goto werr;
if (rdbSaveLen(rdb,expires_size) == -1) goto werr;
/* Iterate this DB writing every entry */
while((de = dictNext(di)) != NULL) {
sds keystr = dictGetKey(de);
robj key, *o = dictGetVal(de);
long long expire;
initStaticStringObject(key,keystr);
expire = getExpire(db,&key);
if (rdbSaveKeyValuePair(rdb,&key,o,expire) == -1) goto werr;
2016-08-09 05:07:32 -04:00
/* When this RDB is produced as part of an AOF rewrite, move
* accumulated diff from parent to child while rewriting in
* order to have a smaller final write. */
if (rdbflags & RDBFLAGS_AOF_PREAMBLE &&
2016-08-09 10:41:40 -04:00
rdb->processed_bytes > processed+AOF_READ_DIFF_INTERVAL_BYTES)
2016-08-09 05:07:32 -04:00
{
2016-08-09 10:41:40 -04:00
processed = rdb->processed_bytes;
2016-08-09 05:07:32 -04:00
aofReadDiffFromParent();
}
}
dictReleaseIterator(di);
di = NULL; /* So that we don't release it again on error. */
}
/* If we are storing the replication information on disk, persist
* the script cache as well: on successful PSYNC after a restart, we need
* to be able to process any EVALSHA inside the replication backlog the
* master will send us. */
if (rsi && dictSize(server.lua_scripts)) {
di = dictGetIterator(server.lua_scripts);
while((de = dictNext(di)) != NULL) {
robj *body = dictGetVal(de);
if (rdbSaveAuxField(rdb,"lua",3,body->ptr,sdslen(body->ptr)) == -1)
goto werr;
}
dictReleaseIterator(di);
di = NULL; /* So that we don't release it again on error. */
}
if (rdbSaveModulesAux(rdb, REDISMODULE_AUX_AFTER_RDB) == -1) goto werr;
/* EOF opcode */
2015-07-27 03:41:48 -04:00
if (rdbSaveType(rdb,RDB_OPCODE_EOF) == -1) goto werr;
/* CRC64 checksum. It will be zero if checksum computation is disabled, the
* loading code skips the check in this case. */
cksum = rdb->cksum;
memrev64ifbe(&cksum);
if (rioWrite(rdb,&cksum,8) == 0) goto werr;
return C_OK;
werr:
if (error) *error = errno;
if (di) dictReleaseIterator(di);
return C_ERR;
}
/* This is just a wrapper to rdbSaveRio() that additionally adds a prefix
* and a suffix to the generated RDB dump. The prefix is:
*
* $EOF:<40 bytes unguessable hex string>\r\n
*
* While the suffix is the 40 bytes hex string we announced in the prefix.
* This way processes receiving the payload can understand when it ends
* without doing any processing of the content. */
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveRioWithEOFMark(rio *rdb, int *error, rdbSaveInfo *rsi) {
2015-07-27 03:41:48 -04:00
char eofmark[RDB_EOF_MARK_SIZE];
startSaving(RDBFLAGS_REPLICATION);
2015-07-27 03:41:48 -04:00
getRandomHexChars(eofmark,RDB_EOF_MARK_SIZE);
if (error) *error = 0;
if (rioWrite(rdb,"$EOF:",5) == 0) goto werr;
2015-07-27 03:41:48 -04:00
if (rioWrite(rdb,eofmark,RDB_EOF_MARK_SIZE) == 0) goto werr;
if (rioWrite(rdb,"\r\n",2) == 0) goto werr;
if (rdbSaveRio(rdb,error,RDBFLAGS_NONE,rsi) == C_ERR) goto werr;
2015-07-27 03:41:48 -04:00
if (rioWrite(rdb,eofmark,RDB_EOF_MARK_SIZE) == 0) goto werr;
stopSaving(1);
return C_OK;
werr: /* Write error. */
/* Set 'error' only if not already set by rdbSaveRio() call. */
if (error && *error == 0) *error = errno;
stopSaving(0);
return C_ERR;
}
/* Save the DB on disk. Return C_ERR on error, C_OK on success. */
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSave(char *filename, rdbSaveInfo *rsi) {
char tmpfile[256];
char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */
FILE *fp = NULL;
rio rdb;
int error = 0;
snprintf(tmpfile,256,"temp-%d.rdb", (int) getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
char *cwdp = getcwd(cwd,MAXPATHLEN);
serverLog(LL_WARNING,
"Failed opening the RDB file %s (in server root dir %s) "
"for saving: %s",
filename,
cwdp ? cwdp : "unknown",
strerror(errno));
return C_ERR;
}
rioInitWithFile(&rdb,fp);
startSaving(RDBFLAGS_NONE);
if (server.rdb_save_incremental_fsync)
rioSetAutoSync(&rdb,REDIS_AUTOSYNC_BYTES);
if (rdbSaveRio(&rdb,&error,RDBFLAGS_NONE,rsi) == C_ERR) {
errno = error;
goto werr;
}
/* Make sure data will not remain on the OS's output buffers */
if (fflush(fp)) goto werr;
if (fsync(fileno(fp))) goto werr;
if (fclose(fp)) { fp = NULL; goto werr; }
fp = NULL;
/* Use RENAME to make sure the DB file is changed atomically only
* if the generate DB file is ok. */
if (rename(tmpfile,filename) == -1) {
char *cwdp = getcwd(cwd,MAXPATHLEN);
serverLog(LL_WARNING,
"Error moving temp DB file %s on the final "
"destination %s (in server root dir %s): %s",
tmpfile,
filename,
cwdp ? cwdp : "unknown",
strerror(errno));
unlink(tmpfile);
stopSaving(0);
return C_ERR;
}
2015-07-27 03:41:48 -04:00
serverLog(LL_NOTICE,"DB saved on disk");
server.dirty = 0;
server.lastsave = time(NULL);
server.lastbgsave_status = C_OK;
stopSaving(1);
return C_OK;
werr:
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,"Write error saving DB on disk: %s", strerror(errno));
if (fp) fclose(fp);
unlink(tmpfile);
stopSaving(0);
return C_ERR;
}
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
if (hasActiveChildProcess()) return C_ERR;
2011-01-05 12:38:31 -05:00
server.dirty_before_bgsave = server.dirty;
server.lastbgsave_try = time(NULL);
openChildInfoPipe();
2011-01-05 12:38:31 -05:00
if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) {
2011-01-05 12:38:31 -05:00
int retval;
/* Child */
redisSetProcTitle("redis-rdb-bgsave");
redisSetCpuAffinity(server.bgsave_cpulist);
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
retval = rdbSave(filename,rsi);
if (retval == C_OK) {
sendChildCOWInfo(CHILD_TYPE_RDB, "RDB");
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* Parent */
if (childpid == -1) {
closeChildInfoPipe();
server.lastbgsave_status = C_ERR;
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
return C_ERR;
}
2015-07-27 03:41:48 -04:00
serverLog(LL_NOTICE,"Background saving started by pid %d",childpid);
server.rdb_save_time_start = time(NULL);
2011-12-21 06:22:13 -05:00
server.rdb_child_pid = childpid;
2015-07-27 03:41:48 -04:00
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
updateDictResizePolicy();
return C_OK;
}
return C_OK; /* unreached */
}
/* Note that we may call this function in signal handle 'sigShutdownHandler',
* so we need guarantee all functions we call are async-signal-safe.
* If we call this function from signal handle, we won't call bg_unlink that
* is not async-signal-safe. */
void rdbRemoveTempFile(pid_t childpid, int from_signal) {
char tmpfile[256];
char pid[32];
/* Generate temp rdb file name using aync-signal safe functions. */
int pid_len = ll2string(pid, sizeof(pid), childpid);
strcpy(tmpfile, "temp-");
strncpy(tmpfile+5, pid, pid_len);
strcpy(tmpfile+5+pid_len, ".rdb");
if (from_signal) {
/* bg_unlink is not async-signal-safe, but in this case we don't really
* need to close the fd, it'll be released when the process exists. */
int fd = open(tmpfile, O_RDONLY|O_NONBLOCK);
UNUSED(fd);
unlink(tmpfile);
} else {
bg_unlink(tmpfile);
}
}
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* This function is called by rdbLoadObject() when the code is in RDB-check
* mode and we find a module value of type 2 that can be parsed without
* the need of the actual module. The value is parsed for errors, finally
* a dummy redis object is returned just to conform to the API. */
robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename) {
uint64_t opcode;
while((opcode = rdbLoadLen(rdb,NULL)) != RDB_MODULE_OPCODE_EOF) {
if (opcode == RDB_MODULE_OPCODE_SINT ||
opcode == RDB_MODULE_OPCODE_UINT)
{
uint64_t len;
if (rdbLoadLenByRef(rdb,NULL,&len) == -1) {
rdbExitReportCorruptRDB(
"Error reading integer from module %s value", modulename);
}
} else if (opcode == RDB_MODULE_OPCODE_STRING) {
robj *o = rdbGenericLoadStringObject(rdb,RDB_LOAD_NONE,NULL);
if (o == NULL) {
rdbExitReportCorruptRDB(
"Error reading string from module %s value", modulename);
}
decrRefCount(o);
} else if (opcode == RDB_MODULE_OPCODE_FLOAT) {
float val;
if (rdbLoadBinaryFloatValue(rdb,&val) == -1) {
rdbExitReportCorruptRDB(
"Error reading float from module %s value", modulename);
}
} else if (opcode == RDB_MODULE_OPCODE_DOUBLE) {
double val;
if (rdbLoadBinaryDoubleValue(rdb,&val) == -1) {
rdbExitReportCorruptRDB(
"Error reading double from module %s value", modulename);
}
}
}
return createStringObject("module-dummy-value",18);
}
/* Load a Redis object of the specified type from the specified file.
* On success a newly allocated object is returned, otherwise NULL. */
robj *rdbLoadObject(int rdbtype, rio *rdb, sds key) {
robj *o = NULL, *ele, *dec;
uint64_t len;
unsigned int i;
int deep_integrity_validation = server.sanitize_dump_payload == SANITIZE_DUMP_YES;
if (server.sanitize_dump_payload == SANITIZE_DUMP_CLIENTS) {
/* Skip sanitization when loading (an RDB), or getting a RESTORE command
* from either the master or a client using an ACL user with the skip-sanitize-payload flag. */
int skip = server.loading ||
(server.current_client && (server.current_client->flags & CLIENT_MASTER));
if (!skip && server.current_client && server.current_client->user)
skip = !!(server.current_client->user->flags & USER_FLAG_SANITIZE_PAYLOAD_SKIP);
deep_integrity_validation = !skip;
}
2015-07-27 03:41:48 -04:00
if (rdbtype == RDB_TYPE_STRING) {
/* Read string value */
if ((o = rdbLoadEncodedStringObject(rdb)) == NULL) return NULL;
o = tryObjectEncoding(o);
2015-07-27 03:41:48 -04:00
} else if (rdbtype == RDB_TYPE_LIST) {
/* Read list value */
2015-07-27 03:41:48 -04:00
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
o = createQuicklistObject();
quicklistSetOptions(o->ptr, server.list_max_ziplist_size,
server.list_compress_depth);
/* Load every single element of the list */
while(len--) {
if ((ele = rdbLoadEncodedStringObject(rdb)) == NULL) {
decrRefCount(o);
return NULL;
}
dec = getDecodedObject(ele);
size_t len = sdslen(dec->ptr);
quicklistPushTail(o->ptr, dec->ptr, len);
decrRefCount(dec);
decrRefCount(ele);
}
2015-07-27 03:41:48 -04:00
} else if (rdbtype == RDB_TYPE_SET) {
/* Read Set value */
2015-07-27 03:41:48 -04:00
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
/* Use a regular set when there are too many entries. */
if (len > server.set_max_intset_entries) {
o = createSetObject();
/* It's faster to expand the dict to the right size asap in order
* to avoid rehashing */
if (len > DICT_HT_INITIAL_SIZE)
dictExpand(o->ptr,len);
} else {
o = createIntsetObject();
}
/* Load every single element of the set */
for (i = 0; i < len; i++) {
long long llval;
sds sdsele;
if ((sdsele = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
decrRefCount(o);
return NULL;
}
if (o->encoding == OBJ_ENCODING_INTSET) {
/* Fetch integer value from element. */
if (isSdsRepresentableAsLongLong(sdsele,&llval) == C_OK) {
o->ptr = intsetAdd(o->ptr,llval,NULL);
} else {
setTypeConvert(o,OBJ_ENCODING_HT);
dictExpand(o->ptr,len);
}
}
/* This will also be called when the set was just converted
* to a regular hash table encoded set. */
if (o->encoding == OBJ_ENCODING_HT) {
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (dictAdd((dict*)o->ptr,sdsele,NULL) != DICT_OK) {
rdbExitReportCorruptRDB("Duplicate set members detected");
decrRefCount(o);
sdsfree(sdsele);
return NULL;
}
} else {
sdsfree(sdsele);
}
}
} else if (rdbtype == RDB_TYPE_ZSET_2 || rdbtype == RDB_TYPE_ZSET) {
/* Read list/set value. */
uint64_t zsetlen;
size_t maxelelen = 0;
zset *zs;
2015-07-27 03:41:48 -04:00
if ((zsetlen = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
o = createZsetObject();
zs = o->ptr;
if (zsetlen > DICT_HT_INITIAL_SIZE)
dictExpand(zs->dict,zsetlen);
/* Load every single element of the sorted set. */
while(zsetlen--) {
sds sdsele;
double score;
zskiplistNode *znode;
if ((sdsele = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
decrRefCount(o);
return NULL;
}
if (rdbtype == RDB_TYPE_ZSET_2) {
if (rdbLoadBinaryDoubleValue(rdb,&score) == -1) {
decrRefCount(o);
sdsfree(sdsele);
return NULL;
}
} else {
if (rdbLoadDoubleValue(rdb,&score) == -1) {
decrRefCount(o);
sdsfree(sdsele);
return NULL;
}
}
/* Don't care about integer-encoded strings. */
if (sdslen(sdsele) > maxelelen) maxelelen = sdslen(sdsele);
znode = zslInsert(zs->zsl,score,sdsele);
dictAdd(zs->dict,sdsele,&znode->score);
}
/* Convert *after* loading, since sorted sets are not stored ordered. */
if (zsetLength(o) <= server.zset_max_ziplist_entries &&
maxelelen <= server.zset_max_ziplist_value)
zsetConvert(o,OBJ_ENCODING_ZIPLIST);
2015-07-27 03:41:48 -04:00
} else if (rdbtype == RDB_TYPE_HASH) {
uint64_t len;
2012-01-03 01:14:10 -05:00
int ret;
sds field, value;
2012-01-03 01:14:10 -05:00
len = rdbLoadLen(rdb, NULL);
2015-07-27 03:41:48 -04:00
if (len == RDB_LENERR) return NULL;
o = createHashObject();
2012-01-03 01:14:10 -05:00
/* Too many entries? Use a hash table. */
2012-01-03 01:14:10 -05:00
if (len > server.hash_max_ziplist_entries)
hashTypeConvert(o, OBJ_ENCODING_HT);
2012-01-03 01:14:10 -05:00
/* Load every field and value into the ziplist */
while (o->encoding == OBJ_ENCODING_ZIPLIST && len > 0) {
len--;
2012-01-03 01:14:10 -05:00
/* Load raw strings */
if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
decrRefCount(o);
return NULL;
}
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
sdsfree(field);
decrRefCount(o);
return NULL;
}
2012-01-03 01:14:10 -05:00
/* Add pair to ziplist */
o->ptr = ziplistPush(o->ptr, (unsigned char*)field,
sdslen(field), ZIPLIST_TAIL);
o->ptr = ziplistPush(o->ptr, (unsigned char*)value,
sdslen(value), ZIPLIST_TAIL);
2012-01-03 01:14:10 -05:00
/* Convert to hash table if size threshold is exceeded */
if (sdslen(field) > server.hash_max_ziplist_value ||
sdslen(value) > server.hash_max_ziplist_value)
{
sdsfree(field);
sdsfree(value);
hashTypeConvert(o, OBJ_ENCODING_HT);
2012-01-03 01:14:10 -05:00
break;
}
sdsfree(field);
sdsfree(value);
}
2012-01-03 01:14:10 -05:00
if (o->encoding == OBJ_ENCODING_HT && len > DICT_HT_INITIAL_SIZE)
dictExpand(o->ptr,len);
2012-01-03 01:14:10 -05:00
/* Load remaining fields and values into the hash table */
while (o->encoding == OBJ_ENCODING_HT && len > 0) {
len--;
2012-01-03 01:14:10 -05:00
/* Load encoded strings */
if ((field = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
decrRefCount(o);
return NULL;
}
if ((value = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL) {
sdsfree(field);
decrRefCount(o);
return NULL;
}
2012-01-03 01:14:10 -05:00
/* Add pair to hash table */
ret = dictAdd((dict*)o->ptr, field, value);
if (ret == DICT_ERR) {
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
rdbExitReportCorruptRDB("Duplicate hash fields detected");
sdsfree(value);
sdsfree(field);
decrRefCount(o);
return NULL;
}
2012-01-03 01:14:10 -05:00
}
/* All pairs should be read by now */
2015-07-26 09:29:53 -04:00
serverAssert(len == 0);
2015-07-27 03:41:48 -04:00
} else if (rdbtype == RDB_TYPE_LIST_QUICKLIST) {
if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL;
o = createQuicklistObject();
quicklistSetOptions(o->ptr, server.list_max_ziplist_size,
server.list_compress_depth);
2012-01-03 01:14:10 -05:00
while (len--) {
size_t encoded_len;
unsigned char *zl =
rdbGenericLoadStringObject(rdb,RDB_LOAD_PLAIN,&encoded_len);
if (zl == NULL) {
decrRefCount(o);
return NULL;
}
if (deep_integrity_validation) server.stat_dump_payload_sanitizations++;
if (!ziplistValidateIntegrity(zl, encoded_len, deep_integrity_validation)) {
rdbExitReportCorruptRDB("Ziplist integrity check failed.");
decrRefCount(o);
zfree(zl);
return NULL;
}
quicklistAppendZiplist(o->ptr, zl);
}
2015-07-27 03:41:48 -04:00
} else if (rdbtype == RDB_TYPE_HASH_ZIPMAP ||
rdbtype == RDB_TYPE_LIST_ZIPLIST ||
rdbtype == RDB_TYPE_SET_INTSET ||
rdbtype == RDB_TYPE_ZSET_ZIPLIST ||
rdbtype == RDB_TYPE_HASH_ZIPLIST)
{
size_t encoded_len;
unsigned char *encoded =
rdbGenericLoadStringObject(rdb,RDB_LOAD_PLAIN,&encoded_len);
if (encoded == NULL) return NULL;
if (rdbtype == RDB_TYPE_HASH_ZIPMAP) {
/* Since we don't keep zipmaps anymore, the rdb loading for these
* is O(n) anyway, use `deep` validation. */
if (!zipmapValidateIntegrity(encoded, encoded_len, 1)) {
rdbExitReportCorruptRDB("Zipmap integrity check failed.");
zfree(encoded);
return NULL;
}
} else if (rdbtype == RDB_TYPE_SET_INTSET) {
if (!intsetValidateIntegrity(encoded, encoded_len)) {
rdbExitReportCorruptRDB("Intset integrity check failed.");
zfree(encoded);
return NULL;
}
} else { /* ziplist */
if (deep_integrity_validation) server.stat_dump_payload_sanitizations++;
if (!ziplistValidateIntegrity(encoded, encoded_len, deep_integrity_validation)) {
rdbExitReportCorruptRDB("Ziplist integrity check failed.");
zfree(encoded);
return NULL;
}
}
o = createObject(OBJ_STRING,encoded); /* Obj type fixed below. */
/* Fix the object encoding, and make sure to convert the encoded
* data type into the base type if accordingly to the current
* configuration there are too many elements in the encoded data
* type. Note that we only check the length and not max element
* size as this is an O(N) scan. Eventually everything will get
* converted. */
switch(rdbtype) {
2015-07-27 03:41:48 -04:00
case RDB_TYPE_HASH_ZIPMAP:
2012-01-03 01:14:10 -05:00
/* Convert to ziplist encoded hash. This must be deprecated
* when loading dumps created by Redis 2.4 gets deprecated. */
{
unsigned char *zl = ziplistNew();
unsigned char *zi = zipmapRewind(o->ptr);
unsigned char *fstr, *vstr;
unsigned int flen, vlen;
unsigned int maxlen = 0;
2012-01-03 01:14:10 -05:00
while ((zi = zipmapNext(zi, &fstr, &flen, &vstr, &vlen)) != NULL) {
if (flen > maxlen) maxlen = flen;
if (vlen > maxlen) maxlen = vlen;
2012-01-03 01:14:10 -05:00
zl = ziplistPush(zl, fstr, flen, ZIPLIST_TAIL);
zl = ziplistPush(zl, vstr, vlen, ZIPLIST_TAIL);
}
zfree(o->ptr);
o->ptr = zl;
o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_ZIPLIST;
2012-01-03 01:14:10 -05:00
if (hashTypeLength(o) > server.hash_max_ziplist_entries ||
maxlen > server.hash_max_ziplist_value)
{
hashTypeConvert(o, OBJ_ENCODING_HT);
}
2012-01-03 01:14:10 -05:00
}
break;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_LIST_ZIPLIST:
o->type = OBJ_LIST;
o->encoding = OBJ_ENCODING_ZIPLIST;
listTypeConvert(o,OBJ_ENCODING_QUICKLIST);
break;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_SET_INTSET:
o->type = OBJ_SET;
o->encoding = OBJ_ENCODING_INTSET;
if (intsetLen(o->ptr) > server.set_max_intset_entries)
setTypeConvert(o,OBJ_ENCODING_HT);
break;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_ZSET_ZIPLIST:
o->type = OBJ_ZSET;
o->encoding = OBJ_ENCODING_ZIPLIST;
if (zsetLength(o) > server.zset_max_ziplist_entries)
zsetConvert(o,OBJ_ENCODING_SKIPLIST);
break;
2015-07-27 03:41:48 -04:00
case RDB_TYPE_HASH_ZIPLIST:
o->type = OBJ_HASH;
o->encoding = OBJ_ENCODING_ZIPLIST;
2012-01-03 01:14:10 -05:00
if (hashTypeLength(o) > server.hash_max_ziplist_entries)
hashTypeConvert(o, OBJ_ENCODING_HT);
2012-01-03 01:14:10 -05:00
break;
default:
/* totally unreachable */
rdbExitReportCorruptRDB("Unknown RDB encoding type %d",rdbtype);
break;
}
} else if (rdbtype == RDB_TYPE_STREAM_LISTPACKS) {
o = createStreamObject();
stream *s = o->ptr;
uint64_t listpacks = rdbLoadLen(rdb,NULL);
if (listpacks == RDB_LENERR) {
rdbReportReadError("Stream listpacks len loading failed.");
decrRefCount(o);
return NULL;
}
while(listpacks--) {
/* Get the master ID, the one we'll use as key of the radix tree
* node: the entries inside the listpack itself are delta-encoded
* relatively to this ID. */
sds nodekey = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL);
if (nodekey == NULL) {
rdbReportReadError("Stream master ID loading failed: invalid encoding or I/O error.");
decrRefCount(o);
return NULL;
}
if (sdslen(nodekey) != sizeof(streamID)) {
rdbExitReportCorruptRDB("Stream node key entry is not the "
"size of a stream ID");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
sdsfree(nodekey);
decrRefCount(o);
return NULL;
}
/* Load the listpack. */
size_t lp_size;
unsigned char *lp =
rdbGenericLoadStringObject(rdb,RDB_LOAD_PLAIN,&lp_size);
if (lp == NULL) {
rdbReportReadError("Stream listpacks loading failed.");
sdsfree(nodekey);
decrRefCount(o);
return NULL;
}
if (deep_integrity_validation) server.stat_dump_payload_sanitizations++;
if (!streamValidateListpackIntegrity(lp, lp_size, deep_integrity_validation)) {
rdbExitReportCorruptRDB("Stream listpack integrity check failed.");
sdsfree(nodekey);
decrRefCount(o);
zfree(lp);
return NULL;
}
unsigned char *first = lpFirst(lp);
if (first == NULL) {
/* Serialized listpacks should never be empty, since on
* deletion we should remove the radix tree key if the
2018-07-01 01:24:50 -04:00
* resulting listpack is empty. */
rdbExitReportCorruptRDB("Empty listpack inside stream");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
sdsfree(nodekey);
decrRefCount(o);
zfree(lp);
return NULL;
}
/* Insert the key in the radix tree. */
int retval = raxInsert(s->rax,
(unsigned char*)nodekey,sizeof(streamID),lp,NULL);
sdsfree(nodekey);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (!retval) {
rdbExitReportCorruptRDB("Listpack re-added with existing key");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
decrRefCount(o);
zfree(lp);
return NULL;
}
}
2017-09-06 06:00:18 -04:00
/* Load total number of items inside the stream. */
s->length = rdbLoadLen(rdb,NULL);
/* Load the last entry ID. */
s->last_id.ms = rdbLoadLen(rdb,NULL);
s->last_id.seq = rdbLoadLen(rdb,NULL);
if (rioGetReadError(rdb)) {
rdbReportReadError("Stream object metadata loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
/* Consumer groups loading */
uint64_t cgroups_count = rdbLoadLen(rdb,NULL);
if (cgroups_count == RDB_LENERR) {
rdbReportReadError("Stream cgroup count loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
while(cgroups_count--) {
/* Get the consumer group name and ID. We can then create the
* consumer group ASAP and populate its structure as
* we read more data. */
streamID cg_id;
sds cgname = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL);
if (cgname == NULL) {
rdbReportReadError(
"Error reading the consumer group name from Stream");
decrRefCount(o);
return NULL;
}
cg_id.ms = rdbLoadLen(rdb,NULL);
cg_id.seq = rdbLoadLen(rdb,NULL);
if (rioGetReadError(rdb)) {
rdbReportReadError("Stream cgroup ID loading failed.");
sdsfree(cgname);
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
streamCG *cgroup = streamCreateCG(s,cgname,sdslen(cgname),&cg_id);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (cgroup == NULL) {
2018-02-14 10:37:24 -05:00
rdbExitReportCorruptRDB("Duplicated consumer group name %s",
cgname);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
decrRefCount(o);
sdsfree(cgname);
return NULL;
}
2018-02-14 10:37:24 -05:00
sdsfree(cgname);
/* Load the global PEL for this consumer group, however we'll
* not yet populate the NACK structures with the message
* owner, since consumers for this group and their messages will
* be read as a next step. So for now leave them not resolved
* and later populate it. */
uint64_t pel_size = rdbLoadLen(rdb,NULL);
if (pel_size == RDB_LENERR) {
rdbReportReadError("Stream PEL size loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
while(pel_size--) {
unsigned char rawid[sizeof(streamID)];
if (rioRead(rdb,rawid,sizeof(rawid)) == 0) {
rdbReportReadError("Stream PEL ID loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
streamNACK *nack = streamCreateNACK(NULL);
nack->delivery_time = rdbLoadMillisecondTime(rdb,RDB_VERSION);
nack->delivery_count = rdbLoadLen(rdb,NULL);
if (rioGetReadError(rdb)) {
rdbReportReadError("Stream PEL NACK loading failed.");
decrRefCount(o);
streamFreeNACK(nack);
return NULL;
}
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (!raxInsert(cgroup->pel,rawid,sizeof(rawid),nack,NULL)) {
rdbExitReportCorruptRDB("Duplicated global PEL entry "
2018-02-14 10:37:24 -05:00
"loading stream consumer group");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
decrRefCount(o);
streamFreeNACK(nack);
return NULL;
}
2018-02-14 10:37:24 -05:00
}
/* Now that we loaded our global PEL, we need to load the
* consumers and their local PELs. */
uint64_t consumers_num = rdbLoadLen(rdb,NULL);
if (consumers_num == RDB_LENERR) {
rdbReportReadError("Stream consumers num loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
while(consumers_num--) {
sds cname = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL);
if (cname == NULL) {
rdbReportReadError(
"Error reading the consumer name from Stream group.");
decrRefCount(o);
return NULL;
}
streamConsumer *consumer =
streamLookupConsumer(cgroup,cname,SLC_NONE,NULL);
2018-02-14 10:37:24 -05:00
sdsfree(cname);
consumer->seen_time = rdbLoadMillisecondTime(rdb,RDB_VERSION);
if (rioGetReadError(rdb)) {
rdbReportReadError("Stream short read reading seen time.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
/* Load the PEL about entries owned by this specific
* consumer. */
pel_size = rdbLoadLen(rdb,NULL);
if (pel_size == RDB_LENERR) {
rdbReportReadError(
"Stream consumer PEL num loading failed.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
while(pel_size--) {
unsigned char rawid[sizeof(streamID)];
if (rioRead(rdb,rawid,sizeof(rawid)) == 0) {
rdbReportReadError(
"Stream short read reading PEL streamID.");
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
streamNACK *nack = raxFind(cgroup->pel,rawid,sizeof(rawid));
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (nack == raxNotFound) {
2018-02-14 10:37:24 -05:00
rdbExitReportCorruptRDB("Consumer entry not found in "
"group global PEL");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
/* Set the NACK consumer, that was left to NULL when
* loading the global PEL. Then set the same shared
* NACK structure also in the consumer-specific PEL. */
nack->consumer = consumer;
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (!raxInsert(consumer->pel,rawid,sizeof(rawid),nack,NULL)) {
2018-02-14 10:37:24 -05:00
rdbExitReportCorruptRDB("Duplicated consumer PEL entry "
" loading a stream consumer "
"group");
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
decrRefCount(o);
return NULL;
}
2018-02-14 10:37:24 -05:00
}
}
}
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
} else if (rdbtype == RDB_TYPE_MODULE || rdbtype == RDB_TYPE_MODULE_2) {
uint64_t moduleid = rdbLoadLen(rdb,NULL);
if (rioGetReadError(rdb)) {
rdbReportReadError("Short read module id");
return NULL;
}
moduleType *mt = moduleTypeLookupModuleByID(moduleid);
char name[10];
if (rdbCheckMode && rdbtype == RDB_TYPE_MODULE_2) {
moduleTypeNameByID(name,moduleid);
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
return rdbLoadCheckModuleValue(rdb,name);
}
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
if (mt == NULL) {
moduleTypeNameByID(name,moduleid);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
rdbExitReportCorruptRDB("The RDB file contains module data I can't load: no matching module '%s'", name);
return NULL;
}
RedisModuleIO io;
robj keyobj;
initStaticStringObject(keyobj,key);
moduleInitIOContext(io,mt,rdb,&keyobj);
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
io.ver = (rdbtype == RDB_TYPE_MODULE) ? 1 : 2;
/* Call the rdb_load method of the module providing the 10 bit
* encoding version in the lower 10 bits of the module ID. */
void *ptr = mt->rdb_load(&io,moduleid&1023);
if (io.ctx) {
moduleFreeContext(io.ctx);
zfree(io.ctx);
}
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
/* Module v2 serialization has an EOF mark at the end. */
if (io.ver == 2) {
uint64_t eof = rdbLoadLen(rdb,NULL);
if (eof == RDB_LENERR) {
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
}
return NULL;
}
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
if (eof != RDB_MODULE_OPCODE_EOF) {
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
rdbExitReportCorruptRDB("The RDB file contains module data for the module '%s' that is not terminated by the proper module value EOF marker", name);
if (ptr) {
o = createModuleObject(mt,ptr); /* creating just in order to easily destroy */
decrRefCount(o);
}
return NULL;
RDB modules values serialization format version 2. The original RDB serialization format was not parsable without the module loaded, becuase the structure was managed only by the module itself. Moreover RDB is a streaming protocol in the sense that it is both produce di an append-only fashion, and is also sometimes directly sent to the socket (in the case of diskless replication). The fact that modules values cannot be parsed without the relevant module loaded is a problem in many ways: RDB checking tools must have loaded modules even for doing things not involving the value at all, like splitting an RDB into N RDBs by key or alike, or just checking the RDB for sanity. In theory module values could be just a blob of data with a prefixed length in order for us to be able to skip it. However prefixing the values with a length would mean one of the following: 1. To be able to write some data at a previous offset. This breaks stremaing. 2. To bufferize values before outputting them. This breaks performances. 3. To have some chunked RDB output format. This breaks simplicity. Moreover, the above solution, still makes module values a totally opaque matter, with the fowllowing problems: 1. The RDB check tool can just skip the value without being able to at least check the general structure. For datasets composed mostly of modules values this means to just check the outer level of the RDB not actually doing any checko on most of the data itself. 2. It is not possible to do any recovering or processing of data for which a module no longer exists in the future, or is unknown. So this commit implements a different solution. The modules RDB serialization API is composed if well defined calls to store integers, floats, doubles or strings. After this commit, the parts generated by the module API have a one-byte prefix for each of the above emitted parts, and there is a final EOF byte as well. So even if we don't know exactly how to interpret a module value, we can always parse it at an high level, check the overall structure, understand the types used to store the information, and easily skip the whole value. The change is backward compatible: older RDB files can be still loaded since the new encoding has a new RDB type: MODULE_2 (of value 7). The commit also implements the ability to check RDB files for sanity taking advantage of the new feature.
2017-06-27 07:09:33 -04:00
}
}
if (ptr == NULL) {
moduleTypeNameByID(name,moduleid);
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
rdbExitReportCorruptRDB("The RDB file contains module data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name);
return NULL;
}
o = createModuleObject(mt,ptr);
} else {
rdbReportReadError("Unknown RDB encoding type %d",rdbtype);
return NULL;
}
return o;
}
/* Mark that we are loading in the global state and setup the fields
* needed to provide loading stats. */
void startLoading(size_t size, int rdbflags) {
/* Load the DB */
server.loading = 1;
server.loading_start_time = time(NULL);
server.loading_loaded_bytes = 0;
server.loading_total_bytes = size;
server.loading_rdb_used_mem = 0;
blockingOperationStarts();
/* Fire the loading modules start event. */
int subevent;
if (rdbflags & RDBFLAGS_AOF_PREAMBLE)
subevent = REDISMODULE_SUBEVENT_LOADING_AOF_START;
else if(rdbflags & RDBFLAGS_REPLICATION)
subevent = REDISMODULE_SUBEVENT_LOADING_REPL_START;
else
subevent = REDISMODULE_SUBEVENT_LOADING_RDB_START;
moduleFireServerEvent(REDISMODULE_EVENT_LOADING,subevent,NULL);
}
/* Mark that we are loading in the global state and setup the fields
* needed to provide loading stats.
* 'filename' is optional and used for rdb-check on error */
void startLoadingFile(FILE *fp, char* filename, int rdbflags) {
struct stat sb;
if (fstat(fileno(fp), &sb) == -1)
sb.st_size = 0;
rdbFileBeingLoaded = filename;
startLoading(sb.st_size, rdbflags);
}
/* Refresh the loading progress info */
void loadingProgress(off_t pos) {
server.loading_loaded_bytes = pos;
if (server.stat_peak_memory < zmalloc_used_memory())
server.stat_peak_memory = zmalloc_used_memory();
}
/* Loading finished */
void stopLoading(int success) {
server.loading = 0;
blockingOperationEnds();
rdbFileBeingLoaded = NULL;
/* Fire the loading modules end event. */
moduleFireServerEvent(REDISMODULE_EVENT_LOADING,
success?
REDISMODULE_SUBEVENT_LOADING_ENDED:
REDISMODULE_SUBEVENT_LOADING_FAILED,
NULL);
}
void startSaving(int rdbflags) {
/* Fire the persistence modules end event. */
int subevent;
if (rdbflags & RDBFLAGS_AOF_PREAMBLE)
subevent = REDISMODULE_SUBEVENT_PERSISTENCE_AOF_START;
else if (getpid()!=server.pid)
subevent = REDISMODULE_SUBEVENT_PERSISTENCE_RDB_START;
else
subevent = REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_RDB_START;
moduleFireServerEvent(REDISMODULE_EVENT_PERSISTENCE,subevent,NULL);
}
void stopSaving(int success) {
/* Fire the persistence modules end event. */
moduleFireServerEvent(REDISMODULE_EVENT_PERSISTENCE,
success?
REDISMODULE_SUBEVENT_PERSISTENCE_ENDED:
REDISMODULE_SUBEVENT_PERSISTENCE_FAILED,
NULL);
}
/* Track loading progress in order to serve client's from time to time
and if needed calculate rdb checksum */
void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) {
if (server.rdb_checksum)
rioGenericUpdateChecksum(r, buf, len);
if (server.loading_process_events_interval_bytes &&
Slaves heartbeat while loading RDB files. Starting with Redis 2.8 masters are able to detect timed out slaves, while before 2.8 only slaves were able to detect a timed out master. Now that timeout detection is bi-directional the following problem happens as described "in the field" by issue #1449: 1) Master and slave setup with big dataset. 2) Slave performs the first synchronization, or a full sync after a failed partial resync. 3) Master sends the RDB payload to the slave. 4) Slave loads this payload. 5) Master detects the slave as timed out since does not receive back the REPLCONF ACK acknowledges. Here the problem is that the master has no way to know how much the slave will take to load the RDB file in memory. The obvious solution is to use a greater replication timeout setting, but this is a shame since for the 0.1% of operation time we are forced to use a timeout that is not what is suited for 99.9% of operation time. This commit tries to fix this problem with a solution that is a bit of an hack, but that modifies little of the replication internals, in order to be back ported to 2.8 safely. During the RDB loading time, we send the master newlines to avoid being sensed as timed out. This is the same that the master already does while saving the RDB file to still signal its presence to the slave. The single newline is used because: 1) It can't desync the protocol, as it is only transmitted all or nothing. 2) It can be safely sent while we don't have a client structure for the master or in similar situations just with write(2).
2013-12-09 07:32:44 -05:00
(r->processed_bytes + len)/server.loading_process_events_interval_bytes > r->processed_bytes/server.loading_process_events_interval_bytes)
{
2015-07-27 03:41:48 -04:00
if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER)
replicationSendNewlineToMaster();
loadingProgress(r->processed_bytes);
processEventsWhileBlocked();
processModuleLoadingProgressEvent(0);
}
}
/* Load an RDB file from the rio stream 'rdb'. On success C_OK is returned,
* otherwise C_ERR is returned and 'errno' is set accordingly. */
int rdbLoadRio(rio *rdb, int rdbflags, rdbSaveInfo *rsi) {
2016-06-01 14:18:28 -04:00
uint64_t dbid;
int type, rdbver;
redisDb *db = server.db+0;
char buf[1024];
rdb->update_cksum = rdbLoadProgressCallback;
rdb->max_processing_chunk = server.loading_process_events_interval_bytes;
if (rioRead(rdb,buf,9) == 0) goto eoferr;
buf[9] = '\0';
if (memcmp(buf,"REDIS",5) != 0) {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,"Wrong signature trying to load DB from file");
errno = EINVAL;
return C_ERR;
}
rdbver = atoi(buf+5);
2015-07-27 03:41:48 -04:00
if (rdbver < 1 || rdbver > RDB_VERSION) {
serverLog(LL_WARNING,"Can't handle RDB format version %d",rdbver);
errno = EINVAL;
return C_ERR;
}
2018-03-15 11:24:53 -04:00
/* Key-specific attributes, set by opcodes before the key type. */
long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now = mstime();
2018-03-15 11:24:53 -04:00
long long lru_clock = LRU_CLOCK();
while(1) {
sds key;
robj *val;
/* Read type. */
if ((type = rdbLoadType(rdb)) == -1) goto eoferr;
/* Handle special types. */
2015-07-27 03:41:48 -04:00
if (type == RDB_OPCODE_EXPIRETIME) {
/* EXPIRETIME: load an expire associated with the next key
* to load. Note that after loading an expire we need to
* load the actual type, and continue. */
expiretime = rdbLoadTime(rdb);
expiretime *= 1000;
if (rioGetReadError(rdb)) goto eoferr;
2018-03-15 11:24:53 -04:00
continue; /* Read next opcode. */
2015-07-27 03:41:48 -04:00
} else if (type == RDB_OPCODE_EXPIRETIME_MS) {
/* EXPIRETIME_MS: milliseconds precision expire times introduced
* with RDB v3. Like EXPIRETIME but no with more precision. */
expiretime = rdbLoadMillisecondTime(rdb,rdbver);
if (rioGetReadError(rdb)) goto eoferr;
2018-03-15 11:24:53 -04:00
continue; /* Read next opcode. */
} else if (type == RDB_OPCODE_FREQ) {
/* FREQ: LFU frequency. */
uint8_t byte;
if (rioRead(rdb,&byte,1) == 0) goto eoferr;
lfu_freq = byte;
2018-03-15 11:33:18 -04:00
continue; /* Read next opcode. */
2018-03-15 11:24:53 -04:00
} else if (type == RDB_OPCODE_IDLE) {
/* IDLE: LRU idle time. */
uint64_t qword;
if ((qword = rdbLoadLen(rdb,NULL)) == RDB_LENERR) goto eoferr;
lru_idle = qword;
2018-03-15 11:33:18 -04:00
continue; /* Read next opcode. */
2015-07-27 03:41:48 -04:00
} else if (type == RDB_OPCODE_EOF) {
/* EOF: End of file, exit the main loop. */
break;
2015-07-27 03:41:48 -04:00
} else if (type == RDB_OPCODE_SELECTDB) {
/* SELECTDB: Select the specified database. */
2018-03-15 11:24:53 -04:00
if ((dbid = rdbLoadLen(rdb,NULL)) == RDB_LENERR) goto eoferr;
if (dbid >= (unsigned)server.dbnum) {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,
"FATAL: Data file was created with a Redis "
"server configured to handle more than %d "
"databases. Exiting\n", server.dbnum);
exit(1);
}
db = server.db+dbid;
2018-03-15 11:24:53 -04:00
continue; /* Read next opcode. */
2015-07-27 03:41:48 -04:00
} else if (type == RDB_OPCODE_RESIZEDB) {
/* RESIZEDB: Hint about the size of the keys in the currently
* selected data base, in order to avoid useless rehashing. */
2016-06-01 14:18:28 -04:00
uint64_t db_size, expires_size;
if ((db_size = rdbLoadLen(rdb,NULL)) == RDB_LENERR)
goto eoferr;
if ((expires_size = rdbLoadLen(rdb,NULL)) == RDB_LENERR)
goto eoferr;
dictExpand(db->dict,db_size);
dictExpand(db->expires,expires_size);
2018-03-15 11:24:53 -04:00
continue; /* Read next opcode. */
2015-07-27 03:41:48 -04:00
} else if (type == RDB_OPCODE_AUX) {
/* AUX: generic string-string fields. Use to add state to RDB
* which is backward compatible. Implementations of RDB loading
Squash merging 125 typo/grammar/comment/doc PRs (#7773) List of squashed commits or PRs =============================== commit 66801ea Author: hwware <wen.hui.ware@gmail.com> Date: Mon Jan 13 00:54:31 2020 -0500 typo fix in acl.c commit 46f55db Author: Itamar Haber <itamar@redislabs.com> Date: Sun Sep 6 18:24:11 2020 +0300 Updates a couple of comments Specifically: * RM_AutoMemory completed instead of pointing to docs * Updated link to custom type doc commit 61a2aa0 Author: xindoo <xindoo@qq.com> Date: Tue Sep 1 19:24:59 2020 +0800 Correct errors in code comments commit a5871d1 Author: yz1509 <pro-756@qq.com> Date: Tue Sep 1 18:36:06 2020 +0800 fix typos in module.c commit 41eede7 Author: bookug <bookug@qq.com> Date: Sat Aug 15 01:11:33 2020 +0800 docs: fix typos in comments commit c303c84 Author: lazy-snail <ws.niu@outlook.com> Date: Fri Aug 7 11:15:44 2020 +0800 fix spelling in redis.conf commit 1eb76bf Author: zhujian <zhujianxyz@gmail.com> Date: Thu Aug 6 15:22:10 2020 +0800 add a missing 'n' in comment commit 1530ec2 Author: Daniel Dai <764122422@qq.com> Date: Mon Jul 27 00:46:35 2020 -0400 fix spelling in tracking.c commit e517b31 Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:32 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit c300eff Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:23 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit 4c058a8 Author: 陈浩鹏 <chenhaopeng@heytea.com> Date: Thu Jun 25 19:00:56 2020 +0800 Grammar fix and clarification commit 5fcaa81 Author: bodong.ybd <bodong.ybd@alibaba-inc.com> Date: Fri Jun 19 10:09:00 2020 +0800 Fix typos commit 4caca9a Author: Pruthvi P <pruthvi@ixigo.com> Date: Fri May 22 00:33:22 2020 +0530 Fix typo eviciton => eviction commit b2a25f6 Author: Brad Dunbar <dunbarb2@gmail.com> Date: Sun May 17 12:39:59 2020 -0400 Fix a typo. commit 12842ae Author: hwware <wen.hui.ware@gmail.com> Date: Sun May 3 17:16:59 2020 -0400 fix spelling in redis conf commit ddba07c Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Sat May 2 23:25:34 2020 +0100 Correct a "conflicts" spelling error. commit 8fc7bf2 Author: Nao YONASHIRO <yonashiro@r.recruit.co.jp> Date: Thu Apr 30 10:25:27 2020 +0900 docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION commit 9b2b67a Author: Brad Dunbar <dunbarb2@gmail.com> Date: Fri Apr 24 11:46:22 2020 -0400 Fix a typo. commit 0746f10 Author: devilinrust <63737265+devilinrust@users.noreply.github.com> Date: Thu Apr 16 00:17:53 2020 +0200 Fix typos in server.c commit 92b588d Author: benjessop12 <56115861+benjessop12@users.noreply.github.com> Date: Mon Apr 13 13:43:55 2020 +0100 Fix spelling mistake in lazyfree.c commit 1da37aa Merge: 2d4ba28 af347a8 Author: hwware <wen.hui.ware@gmail.com> Date: Thu Mar 5 22:41:31 2020 -0500 Merge remote-tracking branch 'upstream/unstable' into expiretypofix commit 2d4ba28 Author: hwware <wen.hui.ware@gmail.com> Date: Mon Mar 2 00:09:40 2020 -0500 fix typo in expire.c commit 1a746f7 Author: SennoYuki <minakami1yuki@gmail.com> Date: Thu Feb 27 16:54:32 2020 +0800 fix typo commit 8599b1a Author: dongheejeong <donghee950403@gmail.com> Date: Sun Feb 16 20:31:43 2020 +0000 Fix typo in server.c commit f38d4e8 Author: hwware <wen.hui.ware@gmail.com> Date: Sun Feb 2 22:58:38 2020 -0500 fix typo in evict.c commit fe143fc Author: Leo Murillo <leonardo.murillo@gmail.com> Date: Sun Feb 2 01:57:22 2020 -0600 Fix a few typos in redis.conf commit 1ab4d21 Author: viraja1 <anchan.viraj@gmail.com> Date: Fri Dec 27 17:15:58 2019 +0530 Fix typo in Latency API docstring commit ca1f70e Author: gosth <danxuedexing@qq.com> Date: Wed Dec 18 15:18:02 2019 +0800 fix typo in sort.c commit a57c06b Author: ZYunH <zyunhjob@163.com> Date: Mon Dec 16 22:28:46 2019 +0800 fix-zset-typo commit b8c92b5 Author: git-hulk <hulk.website@gmail.com> Date: Mon Dec 16 15:51:42 2019 +0800 FIX: typo in cluster.c, onformation->information commit 9dd981c Author: wujm2007 <jim.wujm@gmail.com> Date: Mon Dec 16 09:37:52 2019 +0800 Fix typo commit e132d7a Author: Sebastien Williams-Wynn <s.williamswynn.mail@gmail.com> Date: Fri Nov 15 00:14:07 2019 +0000 Minor typo change commit 47f44d5 Author: happynote3966 <01ssrmikururudevice01@gmail.com> Date: Mon Nov 11 22:08:48 2019 +0900 fix comment typo in redis-cli.c commit b8bdb0d Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 18:00:17 2019 +0800 Fix a spelling mistake of comments in defragDictBucketCallback commit 0def46a Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 13:09:27 2019 +0800 fix some spelling mistakes of comments in defrag.c commit f3596fd Author: Phil Rajchgot <tophil@outlook.com> Date: Sun Oct 13 02:02:32 2019 -0400 Typo and grammar fixes Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully. commit 2b928cd Author: KangZhiDong <worldkzd@gmail.com> Date: Sun Sep 1 07:03:11 2019 +0800 fix typos commit 33aea14 Author: Axlgrep <axlgrep@gmail.com> Date: Tue Aug 27 11:02:18 2019 +0800 Fixed eviction spelling issues commit e282a80 Author: Simen Flatby <simen@oms.no> Date: Tue Aug 20 15:25:51 2019 +0200 Update comments to reflect prop name In the comments the prop is referenced as replica-validity-factor, but it is really named cluster-replica-validity-factor. commit 74d1f9a Author: Jim Green <jimgreen2013@qq.com> Date: Tue Aug 20 20:00:31 2019 +0800 fix comment error, the code is ok commit eea1407 Author: Liao Tonglang <liaotonglang@gmail.com> Date: Fri May 31 10:16:18 2019 +0800 typo fix fix cna't to can't commit 0da553c Author: KAWACHI Takashi <tkawachi@gmail.com> Date: Wed Jul 17 00:38:16 2019 +0900 Fix typo commit 7fc8fb6 Author: Michael Prokop <mika@grml.org> Date: Tue May 28 17:58:42 2019 +0200 Typo fixes s/familar/familiar/ s/compatiblity/compatibility/ s/ ot / to / s/itsef/itself/ commit 5f46c9d Author: zhumoing <34539422+zhumoing@users.noreply.github.com> Date: Tue May 21 21:16:50 2019 +0800 typo-fixes typo-fixes commit 321dfe1 Author: wxisme <850885154@qq.com> Date: Sat Mar 16 15:10:55 2019 +0800 typo fix commit b4fb131 Merge: 267e0e6 3df1eb8 Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Fri Feb 8 22:55:45 2019 +0200 Merge branch 'unstable' of antirez/redis into unstable commit 267e0e6 Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Wed Jan 30 21:26:04 2019 +0200 Minor typo fix commit 30544e7 Author: inshal96 <39904558+inshal96@users.noreply.github.com> Date: Fri Jan 4 16:54:50 2019 +0500 remove an extra 'a' in the comments commit 337969d Author: BrotherGao <yangdongheng11@gmail.com> Date: Sat Dec 29 12:37:29 2018 +0800 fix typo in redis.conf commit 9f4b121 Merge: 423a030 e504583 Author: BrotherGao <yangdongheng@xiaomi.com> Date: Sat Dec 29 11:41:12 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 423a030 Merge: 42b02b7 46a51cd Author: 杨东衡 <yangdongheng@xiaomi.com> Date: Tue Dec 4 23:56:11 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 42b02b7 Merge: 68c0e6e b8febe6 Author: Dongheng Yang <yangdongheng11@gmail.com> Date: Sun Oct 28 15:54:23 2018 +0800 Merge pull request #1 from antirez/unstable update local data commit 714b589 Author: Christian <crifei93@gmail.com> Date: Fri Dec 28 01:17:26 2018 +0100 fix typo "resulution" commit e23259d Author: garenchan <1412950785@qq.com> Date: Wed Dec 26 09:58:35 2018 +0800 fix typo: segfauls -> segfault commit a9359f8 Author: xjp <jianping_xie@aliyun.com> Date: Tue Dec 18 17:31:44 2018 +0800 Fixed REDISMODULE_H spell bug commit a12c3e4 Author: jdiaz <jrd.palacios@gmail.com> Date: Sat Dec 15 23:39:52 2018 -0600 Fixes hyperloglog hash function comment block description commit 770eb11 Author: 林上耀 <1210tom@163.com> Date: Sun Nov 25 17:16:10 2018 +0800 fix typo commit fd97fbb Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Fri Nov 23 17:14:01 2018 +0100 Correct "unsupported" typo. commit a85522d Author: Jungnam Lee <jungnam.lee@oracle.com> Date: Thu Nov 8 23:01:29 2018 +0900 fix typo in test comments commit ade8007 Author: Arun Kumar <palerdot@users.noreply.github.com> Date: Tue Oct 23 16:56:35 2018 +0530 Fixed grammatical typo Fixed typo for word 'dictionary' commit 869ee39 Author: Hamid Alaei <hamid.a85@gmail.com> Date: Sun Aug 12 16:40:02 2018 +0430 fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo commit f89d158 Author: Mayank Jain <mayankjain255@gmail.com> Date: Tue Jul 31 23:01:21 2018 +0530 Updated README.md with some spelling corrections. Made correction in spelling of some misspelled words. commit 892198e Author: dsomeshwar <someshwar.dhayalan@gmail.com> Date: Sat Jul 21 23:23:04 2018 +0530 typo fix commit 8a4d780 Author: Itamar Haber <itamar@redislabs.com> Date: Mon Apr 30 02:06:52 2018 +0300 Fixes some typos commit e3acef6 Author: Noah Rosamilia <ivoahivoah@gmail.com> Date: Sat Mar 3 23:41:21 2018 -0500 Fix typo in /deps/README.md commit 04442fb Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:32:42 2018 +0800 Fix typo in readSyncBulkPayload() comment. commit 9f36880 Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:20:37 2018 +0800 replication.c comment: run_id -> replid. commit f866b4a Author: Francesco 'makevoid' Canessa <makevoid@gmail.com> Date: Thu Feb 22 22:01:56 2018 +0000 fix comment typo in server.c commit 0ebc69b Author: 줍 <jubee0124@gmail.com> Date: Mon Feb 12 16:38:48 2018 +0900 Fix typo in redis.conf Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564) commit b50a620 Author: martinbroadhurst <martinbroadhurst@users.noreply.github.com> Date: Thu Dec 28 12:07:30 2017 +0000 Fix typo in valgrind.sup commit 7d8f349 Author: Peter Boughton <peter@sorcerersisle.com> Date: Mon Nov 27 19:52:19 2017 +0000 Update CONTRIBUTING; refer doc updates to redis-doc repo. commit 02dec7e Author: Klauswk <klauswk1@hotmail.com> Date: Tue Oct 24 16:18:38 2017 -0200 Fix typo in comment commit e1efbc8 Author: chenshi <baiwfg2@gmail.com> Date: Tue Oct 3 18:26:30 2017 +0800 Correct two spelling errors of comments commit 93327d8 Author: spacewander <spacewanderlzx@gmail.com> Date: Wed Sep 13 16:47:24 2017 +0800 Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39. commit 63d361f Author: spacewander <spacewanderlzx@gmail.com> Date: Tue Sep 12 15:06:42 2017 +0800 Fix <prevlen> related doc in ziplist.c According to the definition of ZIP_BIG_PREVLEN and other related code, the guard of single byte <prevlen> should be 254 instead of 255. commit ebe228d Author: hanael80 <hanael80@gmail.com> Date: Tue Aug 15 09:09:40 2017 +0900 Fix typo commit 6b696e6 Author: Matt Robenolt <matt@ydekproductions.com> Date: Mon Aug 14 14:50:47 2017 -0700 Fix typo in LATENCY DOCTOR output commit a2ec6ae Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 15 14:15:16 2017 +0800 Fix a typo: form => from commit 3ab7699 Author: caosiyang <caosiyang@qiyi.com> Date: Thu Aug 10 18:40:33 2017 +0800 Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream() commit 72d43ef Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 8 15:57:25 2017 +0800 fix a typo: servewr => server commit 707c958 Author: Bo Cai <charpty@gmail.com> Date: Wed Jul 26 21:49:42 2017 +0800 redis-cli.c typo: conut -> count. Signed-off-by: Bo Cai <charpty@gmail.com> commit b9385b2 Author: JackDrogon <jack.xsuperman@gmail.com> Date: Fri Jun 30 14:22:31 2017 +0800 Fix some spell problems commit 20d9230 Author: akosel <aaronjkosel@gmail.com> Date: Sun Jun 4 19:35:13 2017 -0500 Fix typo commit b167bfc Author: Krzysiek Witkowicz <krzysiekwitkowicz@gmail.com> Date: Mon May 22 21:32:27 2017 +0100 Fix #4008 small typo in comment commit 2b78ac8 Author: Jake Clarkson <jacobwclarkson@gmail.com> Date: Wed Apr 26 15:49:50 2017 +0100 Correct typo in tests/unit/hyperloglog.tcl commit b0f1cdb Author: Qi Luo <qiluo-msft@users.noreply.github.com> Date: Wed Apr 19 14:25:18 2017 -0700 Fix typo commit a90b0f9 Author: charsyam <charsyam@naver.com> Date: Thu Mar 16 18:19:53 2017 +0900 fix typos fix typos fix typos commit 8430a79 Author: Richard Hart <richardhart92@gmail.com> Date: Mon Mar 13 22:17:41 2017 -0400 Fixed log message typo in listenToPort. commit 481a1c2 Author: Vinod Kumar <kumar003vinod@gmail.com> Date: Sun Jan 15 23:04:51 2017 +0530 src/db.c: Correct "save" -> "safe" typo commit 586b4d3 Author: wangshaonan <wshn13@gmail.com> Date: Wed Dec 21 20:28:27 2016 +0800 Fix typo they->the in helloworld.c commit c1c4b5e Author: Jenner <hypxm@qq.com> Date: Mon Dec 19 16:39:46 2016 +0800 typo error commit 1ee1a3f Author: tielei <43289893@qq.com> Date: Mon Jul 18 13:52:25 2016 +0800 fix some comments commit 11a41fb Author: Otto Kekäläinen <otto@seravo.fi> Date: Sun Jul 3 10:23:55 2016 +0100 Fix spelling in documentation and comments commit 5fb5d82 Author: francischan <f1ancis621@gmail.com> Date: Tue Jun 28 00:19:33 2016 +0800 Fix outdated comments about redis.c file. It should now refer to server.c file. commit 6b254bc Author: lmatt-bit <lmatt123n@gmail.com> Date: Thu Apr 21 21:45:58 2016 +0800 Refine the comment of dictRehashMilliseconds func SLAVECONF->REPLCONF in comment - by andyli029 commit ee9869f Author: clark.kang <charsyam@naver.com> Date: Tue Mar 22 11:09:51 2016 +0900 fix typos commit f7b3b11 Author: Harisankar H <harisankarh@gmail.com> Date: Wed Mar 9 11:49:42 2016 +0530 Typo correction: "faield" --> "failed" Typo correction: "faield" --> "failed" commit 3fd40fc Author: Itamar Haber <itamar@redislabs.com> Date: Thu Feb 25 10:31:51 2016 +0200 Fixes a typo in comments commit 621c160 Author: Prayag Verma <prayag.verma@gmail.com> Date: Mon Feb 1 12:36:20 2016 +0530 Fix typo in Readme.md Spelling mistakes - `eviciton` > `eviction` `familar` > `familiar` commit d7d07d6 Author: WonCheol Lee <toctoc21c@gmail.com> Date: Wed Dec 30 15:11:34 2015 +0900 Typo fixed commit a4dade7 Author: Felix Bünemann <buenemann@louis.info> Date: Mon Dec 28 11:02:55 2015 +0100 [ci skip] Improve supervised upstart config docs This mentions that "expect stop" is required for supervised upstart to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop for an explanation. commit d9caba9 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:30:03 2015 +1100 README: Remove trailing whitespace commit 72d42e5 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:32 2015 +1100 README: Fix typo. th => the commit dd6e957 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:20 2015 +1100 README: Fix typo. familar => familiar commit 3a12b23 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:28:54 2015 +1100 README: Fix typo. eviciton => eviction commit 2d1d03b Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:21:45 2015 +1100 README: Fix typo. sever => server commit 3973b06 Author: Itamar Haber <itamar@garantiadata.com> Date: Sat Dec 19 17:01:20 2015 +0200 Typo fix commit 4f2e460 Author: Steve Gao <fu@2token.com> Date: Fri Dec 4 10:22:05 2015 +0800 Update README - fix typos commit b21667c Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:48:37 2015 +0800 delete redundancy color judge in sdscatcolor commit 88894c7 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:14:42 2015 +0800 the example output shoule be HelloWorld commit 2763470 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 17:41:39 2015 +0800 modify error word keyevente Signed-off-by: binyan <binbin.yan@nokia.com> commit 0847b3d Author: Bruno Martins <bscmartins@gmail.com> Date: Wed Nov 4 11:37:01 2015 +0000 typo commit bbb9e9e Author: dawedawe <dawedawe@gmx.de> Date: Fri Mar 27 00:46:41 2015 +0100 typo: zimap -> zipmap commit 5ed297e Author: Axel Advento <badwolf.bloodseeker.rev@gmail.com> Date: Tue Mar 3 15:58:29 2015 +0800 Fix 'salve' typos to 'slave' commit edec9d6 Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Wed Jun 12 14:12:47 2019 +0200 Update README.md Co-Authored-By: Qix <Qix-@users.noreply.github.com> commit 692a7af Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Tue May 28 14:32:04 2019 +0200 grammar commit d962b0a Author: Nick Frost <nickfrostatx@gmail.com> Date: Wed Jul 20 15:17:12 2016 -0700 Minor grammar fix commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos) Author: Chad Miller <chadm@squareup.com> Date: Tue Sep 8 13:46:11 2020 -0400 Fix faulty comment about operation of unlink() commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355 Author: Kevin <kevin.xgr@gmail.com> Date: Wed Nov 20 00:13:50 2019 +0800 Fix typo in server.c. From a83af59 Mon Sep 17 00:00:00 2001 From: wuwo <wuwo@wacai.com> Date: Fri, 17 Mar 2017 20:37:45 +0800 Subject: [PATCH] falure to failure From c961896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= <veficos@gmail.com> Date: Sat, 27 May 2017 15:33:04 +0800 Subject: [PATCH] fix typo From e600ef2 Mon Sep 17 00:00:00 2001 From: "rui.zou" <rui.zou@yunify.com> Date: Sat, 30 Sep 2017 12:38:15 +0800 Subject: [PATCH] fix a typo From c7d07fa Mon Sep 17 00:00:00 2001 From: Alexandre Perrin <alex@kaworu.ch> Date: Thu, 16 Aug 2018 10:35:31 +0200 Subject: [PATCH] deps README.md typo From b25cb67 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 10:55:37 +0300 Subject: [PATCH 1/2] fix typos in header From ad28ca6 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 11:02:36 +0300 Subject: [PATCH 2/2] fix typos commit 34924cdedd8552466fc22c1168d49236cb7ee915 Author: Adrian Lynch <adi_ady_ade@hotmail.com> Date: Sat Apr 4 21:59:15 2015 +0100 Typos fixed commit fd2a1e7 Author: Jan <jsteemann@users.noreply.github.com> Date: Sat Oct 27 19:13:01 2018 +0200 Fix typos Fix typos commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02 Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:30:07 2019 -0500 Fix multiple misspellings of "following" commit 79b948ce2dac6b453fe80995abbcaac04c213d5a Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:24:28 2019 -0500 Fix misspelling of create-cluster commit 1fffde52666dc99ab35efbd31071a4c008cb5a71 Author: Andy Lester <andy@petdance.com> Date: Wed Jul 31 17:57:56 2019 -0500 Fix typos commit 204c9ba9651e9e05fd73936b452b9a30be456cfe Author: Xiaobo Zhu <xiaobo.zhu@shopee.com> Date: Tue Aug 13 22:19:25 2019 +0800 fix typos Squashed commit of the following: commit 1d9aaf8 Author: danmedani <danmedani@gmail.com> Date: Sun Aug 2 11:40:26 2015 -0700 README typo fix. Squashed commit of the following: commit 32bfa7c Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Jul 6 21:15:08 2015 +0200 Fixed grammer Squashed commit of the following: commit b24f69c Author: Sisir Koppaka <sisir.koppaka@gmail.com> Date: Mon Mar 2 22:38:45 2015 -0500 utils/hashtable/rehashing.c: Fix typos Squashed commit of the following: commit 4e04082 Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Mar 23 08:22:21 2015 +0000 Small config file documentation improvements Squashed commit of the following: commit acb8773 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:52:48 2015 -0700 Typo and grammar fixes in readme commit 2eb75b6 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:36:18 2015 -0700 fixed redis.conf comment Squashed commit of the following: commit a8249a2 Author: Masahiko Sawada <sawada.mshk@gmail.com> Date: Fri Dec 11 11:39:52 2015 +0530 Revise correction of typos. Squashed commit of the following: commit 3c02028 Author: zhaojun11 <zhaojun11@jd.com> Date: Wed Jan 17 19:05:28 2018 +0800 Fix typos include two code typos in cluster.c and latency.c Squashed commit of the following: commit 9dba47c Author: q191201771 <191201771@qq.com> Date: Sat Jan 4 11:31:04 2020 +0800 fix function listCreate comment in adlist.c Update src/server.c commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb Author: charpty <charpty@gmail.com> Date: Tue May 1 23:16:59 2018 +0800 server.c typo: modules system dictionary type comment Signed-off-by: charpty <charpty@gmail.com> commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680 Author: Itamar Haber <itamar@redislabs.com> Date: Sun May 6 00:25:18 2018 +0300 Updates test_helper.tcl's help with undocumented options Specifically: * Host * Port * Client commit bde6f9ced15755cd6407b4af7d601b030f36d60b Author: wxisme <850885154@qq.com> Date: Wed Aug 8 15:19:19 2018 +0800 fix comments in deps files commit 3172474ba991532ab799ee1873439f3402412331 Author: wxisme <850885154@qq.com> Date: Wed Aug 8 14:33:49 2018 +0800 fix some comments commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0 Author: Thor Juhasz <thor@juhasz.pro> Date: Sun Nov 18 14:37:41 2018 +0100 Minor fixes to comments Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed. Fixing minor typos and grammar. There are no changes to configuration options. These changes are only meant to help the user better understand the explanations to the various configuration options
2020-09-10 06:43:38 -04:00
* are required to skip AUX fields they don't understand.
*
* An AUX field is composed of two strings: key and value. */
robj *auxkey, *auxval;
if ((auxkey = rdbLoadStringObject(rdb)) == NULL) goto eoferr;
if ((auxval = rdbLoadStringObject(rdb)) == NULL) goto eoferr;
if (((char*)auxkey->ptr)[0] == '%') {
/* All the fields with a name staring with '%' are considered
* information fields and are logged at startup with a log
* level of NOTICE. */
2015-07-27 03:41:48 -04:00
serverLog(LL_NOTICE,"RDB '%s': %s",
(char*)auxkey->ptr,
(char*)auxval->ptr);
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
} else if (!strcasecmp(auxkey->ptr,"repl-stream-db")) {
if (rsi) rsi->repl_stream_db = atoi(auxval->ptr);
} else if (!strcasecmp(auxkey->ptr,"repl-id")) {
if (rsi && sdslen(auxval->ptr) == CONFIG_RUN_ID_SIZE) {
memcpy(rsi->repl_id,auxval->ptr,CONFIG_RUN_ID_SIZE+1);
rsi->repl_id_is_set = 1;
}
} else if (!strcasecmp(auxkey->ptr,"repl-offset")) {
if (rsi) rsi->repl_offset = strtoll(auxval->ptr,NULL,10);
} else if (!strcasecmp(auxkey->ptr,"lua")) {
/* Load the script back in memory. */
if (luaCreateFunction(NULL,server.lua,auxval) == NULL) {
rdbExitReportCorruptRDB(
"Can't load Lua script from RDB file! "
"BODY: %s", (char*)auxval->ptr);
}
} else if (!strcasecmp(auxkey->ptr,"redis-ver")) {
serverLog(LL_NOTICE,"Loading RDB produced by version %s",
2019-03-04 06:43:00 -05:00
(char*)auxval->ptr);
} else if (!strcasecmp(auxkey->ptr,"ctime")) {
time_t age = time(NULL)-strtol(auxval->ptr,NULL,10);
if (age < 0) age = 0;
serverLog(LL_NOTICE,"RDB age %ld seconds",
(unsigned long) age);
} else if (!strcasecmp(auxkey->ptr,"used-mem")) {
long long usedmem = strtoll(auxval->ptr,NULL,10);
serverLog(LL_NOTICE,"RDB memory usage when created %.2f Mb",
(double) usedmem / (1024*1024));
server.loading_rdb_used_mem = usedmem;
} else if (!strcasecmp(auxkey->ptr,"aof-preamble")) {
long long haspreamble = strtoll(auxval->ptr,NULL,10);
if (haspreamble) serverLog(LL_NOTICE,"RDB has an AOF tail");
} else if (!strcasecmp(auxkey->ptr,"redis-bits")) {
/* Just ignored. */
} else {
/* We ignore fields we don't understand, as by AUX field
* contract. */
2015-07-27 03:41:48 -04:00
serverLog(LL_DEBUG,"Unrecognized RDB AUX field: '%s'",
(char*)auxkey->ptr);
}
2015-01-08 16:23:48 -05:00
decrRefCount(auxkey);
decrRefCount(auxval);
continue; /* Read type again. */
} else if (type == RDB_OPCODE_MODULE_AUX) {
/* Load module data that is not related to the Redis key space.
* Such data can be potentially be stored both before and after the
* RDB keys-values section. */
uint64_t moduleid = rdbLoadLen(rdb,NULL);
int when_opcode = rdbLoadLen(rdb,NULL);
int when = rdbLoadLen(rdb,NULL);
if (rioGetReadError(rdb)) goto eoferr;
if (when_opcode != RDB_MODULE_OPCODE_UINT)
rdbReportReadError("bad when_opcode");
moduleType *mt = moduleTypeLookupModuleByID(moduleid);
char name[10];
moduleTypeNameByID(name,moduleid);
if (!rdbCheckMode && mt == NULL) {
/* Unknown module. */
serverLog(LL_WARNING,"The RDB file contains AUX module data I can't load: no matching module '%s'", name);
exit(1);
} else if (!rdbCheckMode && mt != NULL) {
if (!mt->aux_load) {
/* Module doesn't support AUX. */
serverLog(LL_WARNING,"The RDB file contains module AUX data, but the module '%s' doesn't seem to support it.", name);
exit(1);
}
RedisModuleIO io;
moduleInitIOContext(io,mt,rdb,NULL);
io.ver = 2;
/* Call the rdb_load method of the module providing the 10 bit
* encoding version in the lower 10 bits of the module ID. */
if (mt->aux_load(&io,moduleid&1023, when) != REDISMODULE_OK || io.error) {
moduleTypeNameByID(name,moduleid);
serverLog(LL_WARNING,"The RDB file contains module AUX data for the module type '%s', that the responsible module is not able to load. Check for modules log above for additional clues.", name);
exit(1);
}
if (io.ctx) {
moduleFreeContext(io.ctx);
zfree(io.ctx);
}
uint64_t eof = rdbLoadLen(rdb,NULL);
if (eof != RDB_MODULE_OPCODE_EOF) {
serverLog(LL_WARNING,"The RDB file contains module AUX data for the module '%s' that is not terminated by the proper module value EOF marker", name);
exit(1);
}
continue;
} else {
/* RDB check mode. */
robj *aux = rdbLoadCheckModuleValue(rdb,name);
decrRefCount(aux);
continue; /* Read next opcode. */
}
}
/* Read key */
if ((key = rdbGenericLoadStringObject(rdb,RDB_LOAD_SDS,NULL)) == NULL)
goto eoferr;
/* Read value */
if ((val = rdbLoadObject(type,rdb,key)) == NULL) {
sdsfree(key);
goto eoferr;
}
/* Check if the key already expired. This function is used when loading
* an RDB file from disk, either at startup, or when an RDB was
* received from the master. In the latter case, the master is
* responsible for key expiry. If we would expire keys here, the
* snapshot taken by the master may not be reflected on the slave.
* Similarly if the RDB is the preamble of an AOF file, we want to
* load all the keys as they are, since the log of operations later
* assume to work in an exact keyspace state. */
if (iAmMaster() &&
!(rdbflags&RDBFLAGS_AOF_PREAMBLE) &&
expiretime != -1 && expiretime < now)
{
sdsfree(key);
decrRefCount(val);
2018-03-15 11:24:53 -04:00
} else {
robj keyobj;
initStaticStringObject(keyobj,key);
2018-03-15 11:24:53 -04:00
/* Add the new object in the hash table */
int added = dbAddRDBLoad(db,key,val);
if (!added) {
if (rdbflags & RDBFLAGS_ALLOW_DUP) {
/* This flag is useful for DEBUG RELOAD special modes.
* When it's set we allow new keys to replace the current
* keys with the same name. */
dbSyncDelete(db,&keyobj);
dbAddRDBLoad(db,key,val);
} else {
serverLog(LL_WARNING,
"RDB has duplicated key '%s' in DB %d",key,db->id);
serverPanic("Duplicated key found in RDB file");
}
}
2018-03-15 11:24:53 -04:00
/* Set the expire time if needed */
if (expiretime != -1) {
setExpire(NULL,db,&keyobj,expiretime);
}
/* Set usage information (for eviction). */
objectSetLRUOrLFU(val,lfu_freq,lru_idle,lru_clock,1000);
/* call key space notification on key loaded for modules only */
moduleNotifyKeyspaceEvent(NOTIFY_LOADED, "loaded", &keyobj, db->id);
2018-03-15 11:24:53 -04:00
}
/* Loading the database more slowly is useful in order to test
* certain edge cases. */
if (server.key_load_delay)
debugDelay(server.key_load_delay);
2018-03-15 11:24:53 -04:00
/* Reset the state that is key-specified and is populated by
* opcodes before the key, so that we start from scratch again. */
expiretime = -1;
lfu_freq = -1;
lru_idle = -1;
}
/* Verify the checksum if RDB version is >= 5 */
if (rdbver >= 5) {
uint64_t cksum, expected = rdb->cksum;
if (rioRead(rdb,&cksum,8) == 0) goto eoferr;
Sanitize dump payload: fuzz tester and fixes for segfaults and leaks it exposed The test creates keys with various encodings, DUMP them, corrupt the payload and RESTORES it. It utilizes the recently added use-exit-on-panic config to distinguish between asserts and segfaults. If the restore succeeds, it runs random commands on the key to attempt to trigger a crash. It runs in two modes, one with deep sanitation enabled and one without. In the first one we don't expect any assertions or segfaults, in the second one we expect assertions, but no segfaults. We also check for leaks and invalid reads using valgrind, and if we find them we print the commands that lead to that issue. Changes in the code (other than the test): - Replace a few NPD (null pointer deference) flows and division by zero with an assertion, so that it doesn't fail the test. (since we set the server to use `exit` rather than `abort` on assertion). - Fix quite a lot of flows in rdb.c that could have lead to memory leaks in RESTORE command (since it now responds with an error rather than panic) - Add a DEBUG flag for SET-SKIP-CHECKSUM-VALIDATION so that the test don't need to bother with faking a valid checksum - Remove a pile of code in serverLogObjectDebugInfo which is actually unsafe to run in the crash report (see comments in the code) - fix a missing boundary check in lzf_decompress test suite infra improvements: - be able to run valgrind checks before the process terminates - rotate log files when restarting servers
2020-08-14 09:05:34 -04:00
if (server.rdb_checksum && !server.skip_checksum_validation) {
memrev64ifbe(&cksum);
if (cksum == 0) {
serverLog(LL_WARNING,"RDB file was saved with checksum disabled: no check performed.");
} else if (cksum != expected) {
2020-04-24 19:59:24 -04:00
serverLog(LL_WARNING,"Wrong RDB checksum expected: (%llx) but "
"got (%llx). Aborting now.",
(unsigned long long)expected,
(unsigned long long)cksum);
rdbExitReportCorruptRDB("RDB CRC error");
return C_ERR;
}
}
}
return C_OK;
/* Unexpected end of file is handled here calling rdbReportReadError():
* this will in turn either abort Redis in most cases, or if we are loading
* the RDB file from a socket during initial SYNC (diskless replica mode),
* we'll report the error to the caller, so that we can retry. */
eoferr:
serverLog(LL_WARNING,
"Short read or OOM loading DB. Unrecoverable error, aborting now.");
rdbReportReadError("Unexpected EOF reading RDB file");
return C_ERR;
}
/* Like rdbLoadRio() but takes a filename instead of a rio stream. The
* filename is open for reading and a rio stream object created in order
* to do the actual loading. Moreover the ETA displayed in the INFO
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
* output is initialized and finalized.
*
* If you pass an 'rsi' structure initialied with RDB_SAVE_OPTION_INIT, the
* loading code will fiil the information fields in the structure. */
int rdbLoad(char *filename, rdbSaveInfo *rsi, int rdbflags) {
FILE *fp;
rio rdb;
int retval;
if ((fp = fopen(filename,"r")) == NULL) return C_ERR;
startLoadingFile(fp, filename,rdbflags);
rioInitWithFile(&rdb,fp);
retval = rdbLoadRio(&rdb,rdbflags,rsi);
fclose(fp);
stopLoading(retval==C_OK);
return retval;
}
/* A background saving child (BGSAVE) terminated its work. Handle this.
* This function covers the case of actual BGSAVEs. */
static void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) {
if (!bysignal && exitcode == 0) {
2015-07-27 03:41:48 -04:00
serverLog(LL_NOTICE,
"Background saving terminated with success");
server.dirty = server.dirty - server.dirty_before_bgsave;
server.lastsave = time(NULL);
server.lastbgsave_status = C_OK;
} else if (!bysignal && exitcode != 0) {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING, "Background saving error");
server.lastbgsave_status = C_ERR;
} else {
mstime_t latency;
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,
"Background saving terminated by signal %d", bysignal);
latencyStartMonitor(latency);
rdbRemoveTempFile(server.rdb_child_pid, 0);
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency);
/* SIGUSR1 is whitelisted, so we have a way to kill a child without
Squash merging 125 typo/grammar/comment/doc PRs (#7773) List of squashed commits or PRs =============================== commit 66801ea Author: hwware <wen.hui.ware@gmail.com> Date: Mon Jan 13 00:54:31 2020 -0500 typo fix in acl.c commit 46f55db Author: Itamar Haber <itamar@redislabs.com> Date: Sun Sep 6 18:24:11 2020 +0300 Updates a couple of comments Specifically: * RM_AutoMemory completed instead of pointing to docs * Updated link to custom type doc commit 61a2aa0 Author: xindoo <xindoo@qq.com> Date: Tue Sep 1 19:24:59 2020 +0800 Correct errors in code comments commit a5871d1 Author: yz1509 <pro-756@qq.com> Date: Tue Sep 1 18:36:06 2020 +0800 fix typos in module.c commit 41eede7 Author: bookug <bookug@qq.com> Date: Sat Aug 15 01:11:33 2020 +0800 docs: fix typos in comments commit c303c84 Author: lazy-snail <ws.niu@outlook.com> Date: Fri Aug 7 11:15:44 2020 +0800 fix spelling in redis.conf commit 1eb76bf Author: zhujian <zhujianxyz@gmail.com> Date: Thu Aug 6 15:22:10 2020 +0800 add a missing 'n' in comment commit 1530ec2 Author: Daniel Dai <764122422@qq.com> Date: Mon Jul 27 00:46:35 2020 -0400 fix spelling in tracking.c commit e517b31 Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:32 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit c300eff Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:23 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit 4c058a8 Author: 陈浩鹏 <chenhaopeng@heytea.com> Date: Thu Jun 25 19:00:56 2020 +0800 Grammar fix and clarification commit 5fcaa81 Author: bodong.ybd <bodong.ybd@alibaba-inc.com> Date: Fri Jun 19 10:09:00 2020 +0800 Fix typos commit 4caca9a Author: Pruthvi P <pruthvi@ixigo.com> Date: Fri May 22 00:33:22 2020 +0530 Fix typo eviciton => eviction commit b2a25f6 Author: Brad Dunbar <dunbarb2@gmail.com> Date: Sun May 17 12:39:59 2020 -0400 Fix a typo. commit 12842ae Author: hwware <wen.hui.ware@gmail.com> Date: Sun May 3 17:16:59 2020 -0400 fix spelling in redis conf commit ddba07c Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Sat May 2 23:25:34 2020 +0100 Correct a "conflicts" spelling error. commit 8fc7bf2 Author: Nao YONASHIRO <yonashiro@r.recruit.co.jp> Date: Thu Apr 30 10:25:27 2020 +0900 docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION commit 9b2b67a Author: Brad Dunbar <dunbarb2@gmail.com> Date: Fri Apr 24 11:46:22 2020 -0400 Fix a typo. commit 0746f10 Author: devilinrust <63737265+devilinrust@users.noreply.github.com> Date: Thu Apr 16 00:17:53 2020 +0200 Fix typos in server.c commit 92b588d Author: benjessop12 <56115861+benjessop12@users.noreply.github.com> Date: Mon Apr 13 13:43:55 2020 +0100 Fix spelling mistake in lazyfree.c commit 1da37aa Merge: 2d4ba28 af347a8 Author: hwware <wen.hui.ware@gmail.com> Date: Thu Mar 5 22:41:31 2020 -0500 Merge remote-tracking branch 'upstream/unstable' into expiretypofix commit 2d4ba28 Author: hwware <wen.hui.ware@gmail.com> Date: Mon Mar 2 00:09:40 2020 -0500 fix typo in expire.c commit 1a746f7 Author: SennoYuki <minakami1yuki@gmail.com> Date: Thu Feb 27 16:54:32 2020 +0800 fix typo commit 8599b1a Author: dongheejeong <donghee950403@gmail.com> Date: Sun Feb 16 20:31:43 2020 +0000 Fix typo in server.c commit f38d4e8 Author: hwware <wen.hui.ware@gmail.com> Date: Sun Feb 2 22:58:38 2020 -0500 fix typo in evict.c commit fe143fc Author: Leo Murillo <leonardo.murillo@gmail.com> Date: Sun Feb 2 01:57:22 2020 -0600 Fix a few typos in redis.conf commit 1ab4d21 Author: viraja1 <anchan.viraj@gmail.com> Date: Fri Dec 27 17:15:58 2019 +0530 Fix typo in Latency API docstring commit ca1f70e Author: gosth <danxuedexing@qq.com> Date: Wed Dec 18 15:18:02 2019 +0800 fix typo in sort.c commit a57c06b Author: ZYunH <zyunhjob@163.com> Date: Mon Dec 16 22:28:46 2019 +0800 fix-zset-typo commit b8c92b5 Author: git-hulk <hulk.website@gmail.com> Date: Mon Dec 16 15:51:42 2019 +0800 FIX: typo in cluster.c, onformation->information commit 9dd981c Author: wujm2007 <jim.wujm@gmail.com> Date: Mon Dec 16 09:37:52 2019 +0800 Fix typo commit e132d7a Author: Sebastien Williams-Wynn <s.williamswynn.mail@gmail.com> Date: Fri Nov 15 00:14:07 2019 +0000 Minor typo change commit 47f44d5 Author: happynote3966 <01ssrmikururudevice01@gmail.com> Date: Mon Nov 11 22:08:48 2019 +0900 fix comment typo in redis-cli.c commit b8bdb0d Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 18:00:17 2019 +0800 Fix a spelling mistake of comments in defragDictBucketCallback commit 0def46a Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 13:09:27 2019 +0800 fix some spelling mistakes of comments in defrag.c commit f3596fd Author: Phil Rajchgot <tophil@outlook.com> Date: Sun Oct 13 02:02:32 2019 -0400 Typo and grammar fixes Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully. commit 2b928cd Author: KangZhiDong <worldkzd@gmail.com> Date: Sun Sep 1 07:03:11 2019 +0800 fix typos commit 33aea14 Author: Axlgrep <axlgrep@gmail.com> Date: Tue Aug 27 11:02:18 2019 +0800 Fixed eviction spelling issues commit e282a80 Author: Simen Flatby <simen@oms.no> Date: Tue Aug 20 15:25:51 2019 +0200 Update comments to reflect prop name In the comments the prop is referenced as replica-validity-factor, but it is really named cluster-replica-validity-factor. commit 74d1f9a Author: Jim Green <jimgreen2013@qq.com> Date: Tue Aug 20 20:00:31 2019 +0800 fix comment error, the code is ok commit eea1407 Author: Liao Tonglang <liaotonglang@gmail.com> Date: Fri May 31 10:16:18 2019 +0800 typo fix fix cna't to can't commit 0da553c Author: KAWACHI Takashi <tkawachi@gmail.com> Date: Wed Jul 17 00:38:16 2019 +0900 Fix typo commit 7fc8fb6 Author: Michael Prokop <mika@grml.org> Date: Tue May 28 17:58:42 2019 +0200 Typo fixes s/familar/familiar/ s/compatiblity/compatibility/ s/ ot / to / s/itsef/itself/ commit 5f46c9d Author: zhumoing <34539422+zhumoing@users.noreply.github.com> Date: Tue May 21 21:16:50 2019 +0800 typo-fixes typo-fixes commit 321dfe1 Author: wxisme <850885154@qq.com> Date: Sat Mar 16 15:10:55 2019 +0800 typo fix commit b4fb131 Merge: 267e0e6 3df1eb8 Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Fri Feb 8 22:55:45 2019 +0200 Merge branch 'unstable' of antirez/redis into unstable commit 267e0e6 Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Wed Jan 30 21:26:04 2019 +0200 Minor typo fix commit 30544e7 Author: inshal96 <39904558+inshal96@users.noreply.github.com> Date: Fri Jan 4 16:54:50 2019 +0500 remove an extra 'a' in the comments commit 337969d Author: BrotherGao <yangdongheng11@gmail.com> Date: Sat Dec 29 12:37:29 2018 +0800 fix typo in redis.conf commit 9f4b121 Merge: 423a030 e504583 Author: BrotherGao <yangdongheng@xiaomi.com> Date: Sat Dec 29 11:41:12 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 423a030 Merge: 42b02b7 46a51cd Author: 杨东衡 <yangdongheng@xiaomi.com> Date: Tue Dec 4 23:56:11 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 42b02b7 Merge: 68c0e6e b8febe6 Author: Dongheng Yang <yangdongheng11@gmail.com> Date: Sun Oct 28 15:54:23 2018 +0800 Merge pull request #1 from antirez/unstable update local data commit 714b589 Author: Christian <crifei93@gmail.com> Date: Fri Dec 28 01:17:26 2018 +0100 fix typo "resulution" commit e23259d Author: garenchan <1412950785@qq.com> Date: Wed Dec 26 09:58:35 2018 +0800 fix typo: segfauls -> segfault commit a9359f8 Author: xjp <jianping_xie@aliyun.com> Date: Tue Dec 18 17:31:44 2018 +0800 Fixed REDISMODULE_H spell bug commit a12c3e4 Author: jdiaz <jrd.palacios@gmail.com> Date: Sat Dec 15 23:39:52 2018 -0600 Fixes hyperloglog hash function comment block description commit 770eb11 Author: 林上耀 <1210tom@163.com> Date: Sun Nov 25 17:16:10 2018 +0800 fix typo commit fd97fbb Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Fri Nov 23 17:14:01 2018 +0100 Correct "unsupported" typo. commit a85522d Author: Jungnam Lee <jungnam.lee@oracle.com> Date: Thu Nov 8 23:01:29 2018 +0900 fix typo in test comments commit ade8007 Author: Arun Kumar <palerdot@users.noreply.github.com> Date: Tue Oct 23 16:56:35 2018 +0530 Fixed grammatical typo Fixed typo for word 'dictionary' commit 869ee39 Author: Hamid Alaei <hamid.a85@gmail.com> Date: Sun Aug 12 16:40:02 2018 +0430 fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo commit f89d158 Author: Mayank Jain <mayankjain255@gmail.com> Date: Tue Jul 31 23:01:21 2018 +0530 Updated README.md with some spelling corrections. Made correction in spelling of some misspelled words. commit 892198e Author: dsomeshwar <someshwar.dhayalan@gmail.com> Date: Sat Jul 21 23:23:04 2018 +0530 typo fix commit 8a4d780 Author: Itamar Haber <itamar@redislabs.com> Date: Mon Apr 30 02:06:52 2018 +0300 Fixes some typos commit e3acef6 Author: Noah Rosamilia <ivoahivoah@gmail.com> Date: Sat Mar 3 23:41:21 2018 -0500 Fix typo in /deps/README.md commit 04442fb Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:32:42 2018 +0800 Fix typo in readSyncBulkPayload() comment. commit 9f36880 Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:20:37 2018 +0800 replication.c comment: run_id -> replid. commit f866b4a Author: Francesco 'makevoid' Canessa <makevoid@gmail.com> Date: Thu Feb 22 22:01:56 2018 +0000 fix comment typo in server.c commit 0ebc69b Author: 줍 <jubee0124@gmail.com> Date: Mon Feb 12 16:38:48 2018 +0900 Fix typo in redis.conf Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564) commit b50a620 Author: martinbroadhurst <martinbroadhurst@users.noreply.github.com> Date: Thu Dec 28 12:07:30 2017 +0000 Fix typo in valgrind.sup commit 7d8f349 Author: Peter Boughton <peter@sorcerersisle.com> Date: Mon Nov 27 19:52:19 2017 +0000 Update CONTRIBUTING; refer doc updates to redis-doc repo. commit 02dec7e Author: Klauswk <klauswk1@hotmail.com> Date: Tue Oct 24 16:18:38 2017 -0200 Fix typo in comment commit e1efbc8 Author: chenshi <baiwfg2@gmail.com> Date: Tue Oct 3 18:26:30 2017 +0800 Correct two spelling errors of comments commit 93327d8 Author: spacewander <spacewanderlzx@gmail.com> Date: Wed Sep 13 16:47:24 2017 +0800 Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39. commit 63d361f Author: spacewander <spacewanderlzx@gmail.com> Date: Tue Sep 12 15:06:42 2017 +0800 Fix <prevlen> related doc in ziplist.c According to the definition of ZIP_BIG_PREVLEN and other related code, the guard of single byte <prevlen> should be 254 instead of 255. commit ebe228d Author: hanael80 <hanael80@gmail.com> Date: Tue Aug 15 09:09:40 2017 +0900 Fix typo commit 6b696e6 Author: Matt Robenolt <matt@ydekproductions.com> Date: Mon Aug 14 14:50:47 2017 -0700 Fix typo in LATENCY DOCTOR output commit a2ec6ae Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 15 14:15:16 2017 +0800 Fix a typo: form => from commit 3ab7699 Author: caosiyang <caosiyang@qiyi.com> Date: Thu Aug 10 18:40:33 2017 +0800 Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream() commit 72d43ef Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 8 15:57:25 2017 +0800 fix a typo: servewr => server commit 707c958 Author: Bo Cai <charpty@gmail.com> Date: Wed Jul 26 21:49:42 2017 +0800 redis-cli.c typo: conut -> count. Signed-off-by: Bo Cai <charpty@gmail.com> commit b9385b2 Author: JackDrogon <jack.xsuperman@gmail.com> Date: Fri Jun 30 14:22:31 2017 +0800 Fix some spell problems commit 20d9230 Author: akosel <aaronjkosel@gmail.com> Date: Sun Jun 4 19:35:13 2017 -0500 Fix typo commit b167bfc Author: Krzysiek Witkowicz <krzysiekwitkowicz@gmail.com> Date: Mon May 22 21:32:27 2017 +0100 Fix #4008 small typo in comment commit 2b78ac8 Author: Jake Clarkson <jacobwclarkson@gmail.com> Date: Wed Apr 26 15:49:50 2017 +0100 Correct typo in tests/unit/hyperloglog.tcl commit b0f1cdb Author: Qi Luo <qiluo-msft@users.noreply.github.com> Date: Wed Apr 19 14:25:18 2017 -0700 Fix typo commit a90b0f9 Author: charsyam <charsyam@naver.com> Date: Thu Mar 16 18:19:53 2017 +0900 fix typos fix typos fix typos commit 8430a79 Author: Richard Hart <richardhart92@gmail.com> Date: Mon Mar 13 22:17:41 2017 -0400 Fixed log message typo in listenToPort. commit 481a1c2 Author: Vinod Kumar <kumar003vinod@gmail.com> Date: Sun Jan 15 23:04:51 2017 +0530 src/db.c: Correct "save" -> "safe" typo commit 586b4d3 Author: wangshaonan <wshn13@gmail.com> Date: Wed Dec 21 20:28:27 2016 +0800 Fix typo they->the in helloworld.c commit c1c4b5e Author: Jenner <hypxm@qq.com> Date: Mon Dec 19 16:39:46 2016 +0800 typo error commit 1ee1a3f Author: tielei <43289893@qq.com> Date: Mon Jul 18 13:52:25 2016 +0800 fix some comments commit 11a41fb Author: Otto Kekäläinen <otto@seravo.fi> Date: Sun Jul 3 10:23:55 2016 +0100 Fix spelling in documentation and comments commit 5fb5d82 Author: francischan <f1ancis621@gmail.com> Date: Tue Jun 28 00:19:33 2016 +0800 Fix outdated comments about redis.c file. It should now refer to server.c file. commit 6b254bc Author: lmatt-bit <lmatt123n@gmail.com> Date: Thu Apr 21 21:45:58 2016 +0800 Refine the comment of dictRehashMilliseconds func SLAVECONF->REPLCONF in comment - by andyli029 commit ee9869f Author: clark.kang <charsyam@naver.com> Date: Tue Mar 22 11:09:51 2016 +0900 fix typos commit f7b3b11 Author: Harisankar H <harisankarh@gmail.com> Date: Wed Mar 9 11:49:42 2016 +0530 Typo correction: "faield" --> "failed" Typo correction: "faield" --> "failed" commit 3fd40fc Author: Itamar Haber <itamar@redislabs.com> Date: Thu Feb 25 10:31:51 2016 +0200 Fixes a typo in comments commit 621c160 Author: Prayag Verma <prayag.verma@gmail.com> Date: Mon Feb 1 12:36:20 2016 +0530 Fix typo in Readme.md Spelling mistakes - `eviciton` > `eviction` `familar` > `familiar` commit d7d07d6 Author: WonCheol Lee <toctoc21c@gmail.com> Date: Wed Dec 30 15:11:34 2015 +0900 Typo fixed commit a4dade7 Author: Felix Bünemann <buenemann@louis.info> Date: Mon Dec 28 11:02:55 2015 +0100 [ci skip] Improve supervised upstart config docs This mentions that "expect stop" is required for supervised upstart to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop for an explanation. commit d9caba9 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:30:03 2015 +1100 README: Remove trailing whitespace commit 72d42e5 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:32 2015 +1100 README: Fix typo. th => the commit dd6e957 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:20 2015 +1100 README: Fix typo. familar => familiar commit 3a12b23 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:28:54 2015 +1100 README: Fix typo. eviciton => eviction commit 2d1d03b Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:21:45 2015 +1100 README: Fix typo. sever => server commit 3973b06 Author: Itamar Haber <itamar@garantiadata.com> Date: Sat Dec 19 17:01:20 2015 +0200 Typo fix commit 4f2e460 Author: Steve Gao <fu@2token.com> Date: Fri Dec 4 10:22:05 2015 +0800 Update README - fix typos commit b21667c Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:48:37 2015 +0800 delete redundancy color judge in sdscatcolor commit 88894c7 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:14:42 2015 +0800 the example output shoule be HelloWorld commit 2763470 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 17:41:39 2015 +0800 modify error word keyevente Signed-off-by: binyan <binbin.yan@nokia.com> commit 0847b3d Author: Bruno Martins <bscmartins@gmail.com> Date: Wed Nov 4 11:37:01 2015 +0000 typo commit bbb9e9e Author: dawedawe <dawedawe@gmx.de> Date: Fri Mar 27 00:46:41 2015 +0100 typo: zimap -> zipmap commit 5ed297e Author: Axel Advento <badwolf.bloodseeker.rev@gmail.com> Date: Tue Mar 3 15:58:29 2015 +0800 Fix 'salve' typos to 'slave' commit edec9d6 Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Wed Jun 12 14:12:47 2019 +0200 Update README.md Co-Authored-By: Qix <Qix-@users.noreply.github.com> commit 692a7af Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Tue May 28 14:32:04 2019 +0200 grammar commit d962b0a Author: Nick Frost <nickfrostatx@gmail.com> Date: Wed Jul 20 15:17:12 2016 -0700 Minor grammar fix commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos) Author: Chad Miller <chadm@squareup.com> Date: Tue Sep 8 13:46:11 2020 -0400 Fix faulty comment about operation of unlink() commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355 Author: Kevin <kevin.xgr@gmail.com> Date: Wed Nov 20 00:13:50 2019 +0800 Fix typo in server.c. From a83af59 Mon Sep 17 00:00:00 2001 From: wuwo <wuwo@wacai.com> Date: Fri, 17 Mar 2017 20:37:45 +0800 Subject: [PATCH] falure to failure From c961896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= <veficos@gmail.com> Date: Sat, 27 May 2017 15:33:04 +0800 Subject: [PATCH] fix typo From e600ef2 Mon Sep 17 00:00:00 2001 From: "rui.zou" <rui.zou@yunify.com> Date: Sat, 30 Sep 2017 12:38:15 +0800 Subject: [PATCH] fix a typo From c7d07fa Mon Sep 17 00:00:00 2001 From: Alexandre Perrin <alex@kaworu.ch> Date: Thu, 16 Aug 2018 10:35:31 +0200 Subject: [PATCH] deps README.md typo From b25cb67 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 10:55:37 +0300 Subject: [PATCH 1/2] fix typos in header From ad28ca6 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 11:02:36 +0300 Subject: [PATCH 2/2] fix typos commit 34924cdedd8552466fc22c1168d49236cb7ee915 Author: Adrian Lynch <adi_ady_ade@hotmail.com> Date: Sat Apr 4 21:59:15 2015 +0100 Typos fixed commit fd2a1e7 Author: Jan <jsteemann@users.noreply.github.com> Date: Sat Oct 27 19:13:01 2018 +0200 Fix typos Fix typos commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02 Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:30:07 2019 -0500 Fix multiple misspellings of "following" commit 79b948ce2dac6b453fe80995abbcaac04c213d5a Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:24:28 2019 -0500 Fix misspelling of create-cluster commit 1fffde52666dc99ab35efbd31071a4c008cb5a71 Author: Andy Lester <andy@petdance.com> Date: Wed Jul 31 17:57:56 2019 -0500 Fix typos commit 204c9ba9651e9e05fd73936b452b9a30be456cfe Author: Xiaobo Zhu <xiaobo.zhu@shopee.com> Date: Tue Aug 13 22:19:25 2019 +0800 fix typos Squashed commit of the following: commit 1d9aaf8 Author: danmedani <danmedani@gmail.com> Date: Sun Aug 2 11:40:26 2015 -0700 README typo fix. Squashed commit of the following: commit 32bfa7c Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Jul 6 21:15:08 2015 +0200 Fixed grammer Squashed commit of the following: commit b24f69c Author: Sisir Koppaka <sisir.koppaka@gmail.com> Date: Mon Mar 2 22:38:45 2015 -0500 utils/hashtable/rehashing.c: Fix typos Squashed commit of the following: commit 4e04082 Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Mar 23 08:22:21 2015 +0000 Small config file documentation improvements Squashed commit of the following: commit acb8773 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:52:48 2015 -0700 Typo and grammar fixes in readme commit 2eb75b6 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:36:18 2015 -0700 fixed redis.conf comment Squashed commit of the following: commit a8249a2 Author: Masahiko Sawada <sawada.mshk@gmail.com> Date: Fri Dec 11 11:39:52 2015 +0530 Revise correction of typos. Squashed commit of the following: commit 3c02028 Author: zhaojun11 <zhaojun11@jd.com> Date: Wed Jan 17 19:05:28 2018 +0800 Fix typos include two code typos in cluster.c and latency.c Squashed commit of the following: commit 9dba47c Author: q191201771 <191201771@qq.com> Date: Sat Jan 4 11:31:04 2020 +0800 fix function listCreate comment in adlist.c Update src/server.c commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb Author: charpty <charpty@gmail.com> Date: Tue May 1 23:16:59 2018 +0800 server.c typo: modules system dictionary type comment Signed-off-by: charpty <charpty@gmail.com> commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680 Author: Itamar Haber <itamar@redislabs.com> Date: Sun May 6 00:25:18 2018 +0300 Updates test_helper.tcl's help with undocumented options Specifically: * Host * Port * Client commit bde6f9ced15755cd6407b4af7d601b030f36d60b Author: wxisme <850885154@qq.com> Date: Wed Aug 8 15:19:19 2018 +0800 fix comments in deps files commit 3172474ba991532ab799ee1873439f3402412331 Author: wxisme <850885154@qq.com> Date: Wed Aug 8 14:33:49 2018 +0800 fix some comments commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0 Author: Thor Juhasz <thor@juhasz.pro> Date: Sun Nov 18 14:37:41 2018 +0100 Minor fixes to comments Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed. Fixing minor typos and grammar. There are no changes to configuration options. These changes are only meant to help the user better understand the explanations to the various configuration options
2020-09-10 06:43:38 -04:00
* triggering an error condition. */
if (bysignal != SIGUSR1)
server.lastbgsave_status = C_ERR;
}
}
/* A background saving child (BGSAVE) terminated its work. Handle this.
2018-03-16 04:59:17 -04:00
* This function covers the case of RDB -> Slaves socket transfers for
* diskless replication. */
static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) {
if (!bysignal && exitcode == 0) {
2015-07-27 03:41:48 -04:00
serverLog(LL_NOTICE,
"Background RDB transfer terminated with success");
} else if (!bysignal && exitcode != 0) {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING, "Background transfer error");
} else {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,
"Background transfer terminated by signal %d", bysignal);
}
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
if (server.rdb_child_exit_pipe!=-1)
close(server.rdb_child_exit_pipe);
close(server.rdb_pipe_read);
server.rdb_child_exit_pipe = -1;
server.rdb_pipe_read = -1;
zfree(server.rdb_pipe_conns);
server.rdb_pipe_conns = NULL;
server.rdb_pipe_numconns = 0;
server.rdb_pipe_numconns_writing = 0;
zfree(server.rdb_pipe_buff);
server.rdb_pipe_buff = NULL;
server.rdb_pipe_bufflen = 0;
}
/* When a background RDB saving/transfer terminates, call the right handler. */
void backgroundSaveDoneHandler(int exitcode, int bysignal) {
int type = server.rdb_child_type;
switch(server.rdb_child_type) {
2015-07-27 03:41:48 -04:00
case RDB_CHILD_TYPE_DISK:
backgroundSaveDoneHandlerDisk(exitcode,bysignal);
break;
2015-07-27 03:41:48 -04:00
case RDB_CHILD_TYPE_SOCKET:
backgroundSaveDoneHandlerSocket(exitcode,bysignal);
break;
default:
2015-07-27 03:41:48 -04:00
serverPanic("Unknown RDB child type.");
break;
}
server.rdb_child_pid = -1;
server.rdb_child_type = RDB_CHILD_TYPE_NONE;
server.rdb_save_time_last = time(NULL)-server.rdb_save_time_start;
server.rdb_save_time_start = -1;
/* Possibly there are slaves waiting for a BGSAVE in order to be served
* (the first stage of SYNC is a bulk transfer of dump.rdb) */
updateSlavesWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, type);
}
/* Kill the RDB saving child using SIGUSR1 (so that the parent will know
* the child did not exit for an error, but because we wanted), and performs
* the cleanup needed. */
void killRDBChild(void) {
kill(server.rdb_child_pid,SIGUSR1);
rdbRemoveTempFile(server.rdb_child_pid, 0);
closeChildInfoPipe();
updateDictResizePolicy();
}
/* Spawn an RDB child that writes the RDB to the sockets of the slaves
2015-07-27 03:41:48 -04:00
* that are currently in SLAVE_STATE_WAIT_BGSAVE_START state. */
PSYNC2: different improvements to Redis replication. The gist of the changes is that now, partial resynchronizations between slaves and masters (without the need of a full resync with RDB transfer and so forth), work in a number of cases when it was impossible in the past. For instance: 1. When a slave is promoted to mastrer, the slaves of the old master can partially resynchronize with the new master. 2. Chained slalves (slaves of slaves) can be moved to replicate to other slaves or the master itsef, without requiring a full resync. 3. The master itself, after being turned into a slave, is able to partially resynchronize with the new master, when it joins replication again. In order to obtain this, the following main changes were operated: * Slaves also take a replication backlog, not just masters. * Same stream replication for all the slaves and sub slaves. The replication stream is identical from the top level master to its slaves and is also the same from the slaves to their sub-slaves and so forth. This means that if a slave is later promoted to master, it has the same replication backlong, and can partially resynchronize with its slaves (that were previously slaves of the old master). * A given replication history is no longer identified by the `runid` of a Redis node. There is instead a `replication ID` which changes every time the instance has a new history no longer coherent with the past one. So, for example, slaves publish the same replication history of their master, however when they are turned into masters, they publish a new replication ID, but still remember the old ID, so that they are able to partially resynchronize with slaves of the old master (up to a given offset). * The replication protocol was slightly modified so that a new extended +CONTINUE reply from the master is able to inform the slave of a replication ID change. * REPLCONF CAPA is used in order to notify masters that a slave is able to understand the new +CONTINUE reply. * The RDB file was extended with an auxiliary field that is able to select a given DB after loading in the slave, so that the slave can continue receiving the replication stream from the point it was disconnected without requiring the master to insert "SELECT" statements. This is useful in order to guarantee the "same stream" property, because the slave must be able to accumulate an identical backlog. * Slave pings to sub-slaves are now sent in a special form, when the top-level master is disconnected, in order to don't interfer with the replication stream. We just use out of band "\n" bytes as in other parts of the Redis protocol. An old design document is available here: https://gist.github.com/antirez/ae068f95c0d084891305 However the implementation is not identical to the description because during the work to implement it, different changes were needed in order to make things working well.
2016-11-09 05:31:06 -05:00
int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) {
listNode *ln;
listIter li;
pid_t childpid;
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
int pipefds[2], rdb_pipe_write, safe_to_exit_pipe;
if (hasActiveChildProcess()) return C_ERR;
/* Even if the previous fork child exited, don't start a new one until we
* drained the pipe. */
if (server.rdb_pipe_conns) return C_ERR;
/* Before to fork, create a pipe that is used to transfer the rdb bytes to
* the parent, we can't let it write directly to the sockets, since in case
* of TLS we must let the parent handle a continuous TLS state when the
* child terminates and parent takes over. */
if (pipe(pipefds) == -1) return C_ERR;
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
server.rdb_pipe_read = pipefds[0]; /* read end */
rdb_pipe_write = pipefds[1]; /* write end */
anetNonBlock(NULL, server.rdb_pipe_read);
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
/* create another pipe that is used by the parent to signal to the child
* that it can exit. */
if (pipe(pipefds) == -1) {
close(rdb_pipe_write);
close(server.rdb_pipe_read);
return C_ERR;
}
safe_to_exit_pipe = pipefds[0]; /* read end */
server.rdb_child_exit_pipe = pipefds[1]; /* write end */
/* Collect the connections of the replicas we want to transfer
* the RDB to, which are i WAIT_BGSAVE_START state. */
server.rdb_pipe_conns = zmalloc(sizeof(connection *)*listLength(server.slaves));
server.rdb_pipe_numconns = 0;
server.rdb_pipe_numconns_writing = 0;
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
2015-07-27 03:41:48 -04:00
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) {
server.rdb_pipe_conns[server.rdb_pipe_numconns++] = slave->conn;
replicationSetupSlaveForFullResync(slave,getPsyncInitialOffset());
}
}
/* Create the child process. */
openChildInfoPipe();
if ((childpid = redisFork(CHILD_TYPE_RDB)) == 0) {
/* Child */
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
int retval, dummy;
rio rdb;
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
rioInitWithFd(&rdb,rdb_pipe_write);
redisSetProcTitle("redis-rdb-to-slaves");
redisSetCpuAffinity(server.bgsave_cpulist);
retval = rdbSaveRioWithEOFMark(&rdb,NULL,rsi);
if (retval == C_OK && rioFlush(&rdb) == 0)
retval = C_ERR;
if (retval == C_OK) {
sendChildCOWInfo(CHILD_TYPE_RDB, "RDB");
}
rioFreeFd(&rdb);
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
/* wake up the reader, tell it we're done. */
close(rdb_pipe_write);
close(server.rdb_child_exit_pipe); /* close write end so that we can detect the close on the parent. */
/* hold exit until the parent tells us it's safe. we're not expecting
* to read anything, just get the error when the pipe is closed. */
dummy = read(safe_to_exit_pipe, pipefds, 1);
UNUSED(dummy);
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* Parent */
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
close(safe_to_exit_pipe);
if (childpid == -1) {
2015-07-27 03:41:48 -04:00
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* Undo the state change. The caller will perform cleanup on
* all the slaves in BGSAVE_START state, but an early call to
* replicationSetupSlaveForFullResync() turned it into BGSAVE_END */
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) {
slave->replstate = SLAVE_STATE_WAIT_BGSAVE_START;
}
}
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
close(rdb_pipe_write);
close(server.rdb_pipe_read);
zfree(server.rdb_pipe_conns);
server.rdb_pipe_conns = NULL;
server.rdb_pipe_numconns = 0;
server.rdb_pipe_numconns_writing = 0;
closeChildInfoPipe();
} else {
serverLog(LL_NOTICE,"Background RDB transfer started by pid %d",
childpid);
server.rdb_save_time_start = time(NULL);
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_SOCKET;
updateDictResizePolicy();
if diskless repl child is killed, make sure to reap the pid (#7742) Starting redis 6.0 and the changes we made to the diskless master to be suitable for TLS, I made the master avoid reaping (wait3) the pid of the child until we know all replicas are done reading their rdb. I did that in order to avoid a state where the rdb_child_pid is -1 but we don't yet want to start another fork (still busy serving that data to replicas). It turns out that the solution used so far was problematic in case the fork child was being killed (e.g. by the kernel OOM killer), in that case there's a chance that we currently disabled the read event on the rdb pipe, since we're waiting for a replica to become writable again. and in that scenario the master would have never realized the child exited, and the replica will remain hung too. Note that there's no mechanism to detect a hung replica while it's in rdb transfer state. The solution here is to add another pipe which is used by the parent to tell the child it is safe to exit. this mean that when the child exits, for whatever reason, it is safe to reap it. Besides that, i'm re-introducing an adjustment to REPLCONF ACK which was part of #6271 (Accelerate diskless master connections) but was dropped when that PR was rebased after the TLS fork/pipe changes (5a47794). Now that RdbPipeCleanup no longer calls checkChildrenDone, and the ACK has chance to detect that the child exited, it should be the one to call it so that we don't have to wait for cron (server.hz) to do that.
2020-09-06 09:43:57 -04:00
close(rdb_pipe_write); /* close write in parent so that it can detect the close on the child. */
if (aeCreateFileEvent(server.el, server.rdb_pipe_read, AE_READABLE, rdbPipeReadHandler,NULL) == AE_ERR) {
serverPanic("Unrecoverable error creating server.rdb_pipe_read file event.");
}
}
return (childpid == -1) ? C_ERR : C_OK;
}
return C_OK; /* Unreached. */
}
void saveCommand(client *c) {
2011-12-21 06:22:13 -05:00
if (server.rdb_child_pid != -1) {
addReplyError(c,"Background save already in progress");
return;
}
rdbSaveInfo rsi, *rsiptr;
rsiptr = rdbPopulateSaveInfo(&rsi);
if (rdbSave(server.rdb_filename,rsiptr) == C_OK) {
addReply(c,shared.ok);
} else {
addReply(c,shared.err);
}
}
/* BGSAVE [SCHEDULE] */
void bgsaveCommand(client *c) {
int schedule = 0;
/* The SCHEDULE option changes the behavior of BGSAVE when an AOF rewrite
* is in progress. Instead of returning an error a BGSAVE gets scheduled. */
if (c->argc > 1) {
if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"schedule")) {
schedule = 1;
} else {
addReply(c,shared.syntaxerr);
return;
}
}
rdbSaveInfo rsi, *rsiptr;
rsiptr = rdbPopulateSaveInfo(&rsi);
2011-12-21 06:22:13 -05:00
if (server.rdb_child_pid != -1) {
addReplyError(c,"Background save already in progress");
} else if (hasActiveChildProcess()) {
if (schedule) {
server.rdb_bgsave_scheduled = 1;
addReplyStatus(c,"Background saving scheduled");
} else {
addReplyError(c,
2019-09-27 05:59:37 -04:00
"Another child process is active (AOF?): can't BGSAVE right now. "
"Use BGSAVE SCHEDULE in order to schedule a BGSAVE whenever "
"possible.");
}
} else if (rdbSaveBackground(server.rdb_filename,rsiptr) == C_OK) {
addReplyStatus(c,"Background saving started");
} else {
addReply(c,shared.err);
}
}
/* Populate the rdbSaveInfo structure used to persist the replication
* information inside the RDB file. Currently the structure explicitly
* contains just the currently selected DB from the master stream, however
* if the rdbSave*() family functions receive a NULL rsi structure also
* the Replication ID/offset is not saved. The function popultes 'rsi'
* that is normally stack-allocated in the caller, returns the populated
* pointer if the instance has a valid master client, otherwise NULL
* is returned, and the RDB saving will not persist any replication related
* information. */
rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) {
rdbSaveInfo rsi_init = RDB_SAVE_INFO_INIT;
*rsi = rsi_init;
/* If the instance is a master, we can populate the replication info
* only when repl_backlog is not NULL. If the repl_backlog is NULL,
* it means that the instance isn't in any replication chains. In this
* scenario the replication info is useless, because when a slave
* connects to us, the NULL repl_backlog will trigger a full
* synchronization, at the same time we will use a new replid and clear
* replid2. */
if (!server.masterhost && server.repl_backlog) {
/* Note that when server.slaveseldb is -1, it means that this master
* didn't apply any write commands after a full synchronization.
* So we can let repl_stream_db be 0, this allows a restarted slave
* to reload replication ID/offset, it's safe because the next write
* command must generate a SELECT statement. */
rsi->repl_stream_db = server.slaveseldb == -1 ? 0 : server.slaveseldb;
return rsi;
}
/* If the instance is a slave we need a connected master
* in order to fetch the currently selected DB. */
if (server.master) {
rsi->repl_stream_db = server.master->db->id;
return rsi;
}
/* If we have a cached master we can use it in order to populate the
* replication selected DB info inside the RDB file: the slave can
* increment the master_repl_offset only from data arriving from the
* master, so if we are disconnected the offset in the cached master
* is valid. */
if (server.cached_master) {
rsi->repl_stream_db = server.cached_master->db->id;
return rsi;
}
return NULL;
}