mirror of
https://codeberg.org/redict/redict.git
synced 2025-01-23 00:28:26 -05:00
743 lines
22 KiB
C
743 lines
22 KiB
C
#include "redis.h"
|
|
|
|
#include <signal.h>
|
|
|
|
void SlotToKeyAdd(robj *key);
|
|
void SlotToKeyDel(robj *key);
|
|
|
|
/*-----------------------------------------------------------------------------
|
|
* C-level DB API
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
/* Important notes on lookup and disk store.
|
|
*
|
|
* When disk store is enabled on lookup we can have different cases.
|
|
*
|
|
* a) The key is in memory:
|
|
* - If the key is not in IO_SAVEINPROG state we can access it.
|
|
* As if it's just IO_SAVE this means we have the key in the IO queue
|
|
* but can't be accessed by the IO thread (it requires to be
|
|
* translated into an IO Job by the cache cron function.)
|
|
* - If the key is in IO_SAVEINPROG we can't touch the key and have
|
|
* to blocking wait completion of operations.
|
|
* b) The key is not in memory:
|
|
* - If it's marked as non existing on disk as well (negative cache)
|
|
* we don't need to perform the disk access.
|
|
* - if the key MAY EXIST, but is not in memory, and it is marked as IO_SAVE
|
|
* then the key can only be a deleted one. As IO_SAVE keys are never
|
|
* evicted (dirty state), so the only possibility is that key was deleted.
|
|
* - if the key MAY EXIST we need to blocking load it.
|
|
* We check that the key is not in IO_SAVEINPROG state before accessing
|
|
* the disk object. If it is in this state, we wait.
|
|
*/
|
|
|
|
void lookupWaitBusyKey(redisDb *db, robj *key) {
|
|
/* FIXME: wait just for this key, not everything */
|
|
waitEmptyIOJobsQueue();
|
|
processAllPendingIOJobs();
|
|
redisAssert((cacheScheduleIOGetFlags(db,key) & REDIS_IO_SAVEINPROG) == 0);
|
|
}
|
|
|
|
robj *lookupKey(redisDb *db, robj *key) {
|
|
dictEntry *de = dictFind(db->dict,key->ptr);
|
|
if (de) {
|
|
robj *val = dictGetEntryVal(de);
|
|
|
|
/* Update the access time for the aging algorithm.
|
|
* Don't do it if we have a saving child, as this will trigger
|
|
* a copy on write madness. */
|
|
if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1)
|
|
val->lru = server.lruclock;
|
|
|
|
if (server.ds_enabled &&
|
|
cacheScheduleIOGetFlags(db,key) & REDIS_IO_SAVEINPROG)
|
|
{
|
|
/* Need to wait for the key to get unbusy */
|
|
redisLog(REDIS_DEBUG,"Lookup found a key in SAVEINPROG state. Waiting. (Key was in the cache)");
|
|
lookupWaitBusyKey(db,key);
|
|
}
|
|
server.stat_keyspace_hits++;
|
|
return val;
|
|
} else {
|
|
time_t expire;
|
|
robj *val;
|
|
|
|
/* Key not found in the in memory hash table, but if disk store is
|
|
* enabled we may have this key on disk. If so load it in memory
|
|
* in a blocking way. */
|
|
if (server.ds_enabled && cacheKeyMayExist(db,key)) {
|
|
long flags = cacheScheduleIOGetFlags(db,key);
|
|
|
|
/* They key is not in cache, but it has a SAVE op in queue?
|
|
* The only possibility is that the key was deleted, since
|
|
* dirty keys are not evicted. */
|
|
if (flags & REDIS_IO_SAVE) {
|
|
server.stat_keyspace_misses++;
|
|
return NULL;
|
|
}
|
|
|
|
/* At this point we need to blocking load the key in memory.
|
|
* The first thing we do is waiting here if the key is busy. */
|
|
if (flags & REDIS_IO_SAVEINPROG) {
|
|
redisLog(REDIS_DEBUG,"Lookup found a key in SAVEINPROG state. Waiting (while force loading).");
|
|
lookupWaitBusyKey(db,key);
|
|
}
|
|
|
|
redisLog(REDIS_DEBUG,"Force loading key %s via lookup", key->ptr);
|
|
val = dsGet(db,key,&expire);
|
|
if (val) {
|
|
int retval = dbAdd(db,key,val);
|
|
redisAssert(retval == REDIS_OK);
|
|
if (expire != -1) setExpire(db,key,expire);
|
|
server.stat_keyspace_hits++;
|
|
return val;
|
|
} else {
|
|
cacheSetKeyDoesNotExist(db,key);
|
|
}
|
|
}
|
|
server.stat_keyspace_misses++;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
robj *lookupKeyRead(redisDb *db, robj *key) {
|
|
expireIfNeeded(db,key);
|
|
return lookupKey(db,key);
|
|
}
|
|
|
|
robj *lookupKeyWrite(redisDb *db, robj *key) {
|
|
expireIfNeeded(db,key);
|
|
return lookupKey(db,key);
|
|
}
|
|
|
|
robj *lookupKeyReadOrReply(redisClient *c, robj *key, robj *reply) {
|
|
robj *o = lookupKeyRead(c->db, key);
|
|
if (!o) addReply(c,reply);
|
|
return o;
|
|
}
|
|
|
|
robj *lookupKeyWriteOrReply(redisClient *c, robj *key, robj *reply) {
|
|
robj *o = lookupKeyWrite(c->db, key);
|
|
if (!o) addReply(c,reply);
|
|
return o;
|
|
}
|
|
|
|
/* Add the key to the DB. If the key already exists REDIS_ERR is returned,
|
|
* otherwise REDIS_OK is returned, and the caller should increment the
|
|
* refcount of 'val'. */
|
|
int dbAdd(redisDb *db, robj *key, robj *val) {
|
|
/* Perform a lookup before adding the key, as we need to copy the
|
|
* key value. */
|
|
if (dictFind(db->dict, key->ptr) != NULL) {
|
|
return REDIS_ERR;
|
|
} else {
|
|
sds copy = sdsdup(key->ptr);
|
|
dictAdd(db->dict, copy, val);
|
|
if (server.ds_enabled) cacheSetKeyMayExist(db,key);
|
|
if (server.cluster_enabled) SlotToKeyAdd(key);
|
|
return REDIS_OK;
|
|
}
|
|
}
|
|
|
|
/* If the key does not exist, this is just like dbAdd(). Otherwise
|
|
* the value associated to the key is replaced with the new one.
|
|
*
|
|
* On update (key already existed) 0 is returned. Otherwise 1. */
|
|
int dbReplace(redisDb *db, robj *key, robj *val) {
|
|
robj *oldval;
|
|
int retval;
|
|
|
|
if ((oldval = dictFetchValue(db->dict,key->ptr)) == NULL) {
|
|
sds copy = sdsdup(key->ptr);
|
|
dictAdd(db->dict, copy, val);
|
|
if (server.cluster_enabled) SlotToKeyAdd(key);
|
|
retval = 1;
|
|
} else {
|
|
dictReplace(db->dict, key->ptr, val);
|
|
retval = 0;
|
|
}
|
|
if (server.ds_enabled) cacheSetKeyMayExist(db,key);
|
|
return retval;
|
|
}
|
|
|
|
int dbExists(redisDb *db, robj *key) {
|
|
return dictFind(db->dict,key->ptr) != NULL;
|
|
}
|
|
|
|
/* Return a random key, in form of a Redis object.
|
|
* If there are no keys, NULL is returned.
|
|
*
|
|
* The function makes sure to return keys not already expired. */
|
|
robj *dbRandomKey(redisDb *db) {
|
|
struct dictEntry *de;
|
|
|
|
while(1) {
|
|
sds key;
|
|
robj *keyobj;
|
|
|
|
de = dictGetRandomKey(db->dict);
|
|
if (de == NULL) return NULL;
|
|
|
|
key = dictGetEntryKey(de);
|
|
keyobj = createStringObject(key,sdslen(key));
|
|
if (dictFind(db->expires,key)) {
|
|
if (expireIfNeeded(db,keyobj)) {
|
|
decrRefCount(keyobj);
|
|
continue; /* search for another key. This expired. */
|
|
}
|
|
}
|
|
return keyobj;
|
|
}
|
|
}
|
|
|
|
/* Delete a key, value, and associated expiration entry if any, from the DB */
|
|
int dbDelete(redisDb *db, robj *key) {
|
|
/* If diskstore is enabled make sure to awake waiting clients for this key
|
|
* as it is not really useful to wait for a key already deleted to be
|
|
* loaded from disk. */
|
|
if (server.ds_enabled) {
|
|
handleClientsBlockedOnSwappedKey(db,key);
|
|
cacheSetKeyDoesNotExist(db,key);
|
|
}
|
|
|
|
/* Deleting an entry from the expires dict will not free the sds of
|
|
* the key, because it is shared with the main dictionary. */
|
|
if (dictSize(db->expires) > 0) dictDelete(db->expires,key->ptr);
|
|
if (dictDelete(db->dict,key->ptr) == DICT_OK) {
|
|
if (server.cluster_enabled) SlotToKeyDel(key);
|
|
return 1;
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* Empty the whole database.
|
|
* If diskstore is enabled this function will just flush the in-memory cache. */
|
|
long long emptyDb() {
|
|
int j;
|
|
long long removed = 0;
|
|
|
|
for (j = 0; j < server.dbnum; j++) {
|
|
removed += dictSize(server.db[j].dict);
|
|
dictEmpty(server.db[j].dict);
|
|
dictEmpty(server.db[j].expires);
|
|
if (server.ds_enabled) dictEmpty(server.db[j].io_negcache);
|
|
}
|
|
return removed;
|
|
}
|
|
|
|
int selectDb(redisClient *c, int id) {
|
|
if (id < 0 || id >= server.dbnum)
|
|
return REDIS_ERR;
|
|
c->db = &server.db[id];
|
|
return REDIS_OK;
|
|
}
|
|
|
|
/*-----------------------------------------------------------------------------
|
|
* Hooks for key space changes.
|
|
*
|
|
* Every time a key in the database is modified the function
|
|
* signalModifiedKey() is called.
|
|
*
|
|
* Every time a DB is flushed the function signalFlushDb() is called.
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
void signalModifiedKey(redisDb *db, robj *key) {
|
|
touchWatchedKey(db,key);
|
|
if (server.ds_enabled)
|
|
cacheScheduleIO(db,key,REDIS_IO_SAVE);
|
|
}
|
|
|
|
void signalFlushedDb(int dbid) {
|
|
touchWatchedKeysOnFlush(dbid);
|
|
}
|
|
|
|
/*-----------------------------------------------------------------------------
|
|
* Type agnostic commands operating on the key space
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
void flushdbCommand(redisClient *c) {
|
|
server.dirty += dictSize(c->db->dict);
|
|
signalFlushedDb(c->db->id);
|
|
dictEmpty(c->db->dict);
|
|
dictEmpty(c->db->expires);
|
|
if (server.ds_enabled) dsFlushDb(c->db->id);
|
|
addReply(c,shared.ok);
|
|
}
|
|
|
|
void flushallCommand(redisClient *c) {
|
|
signalFlushedDb(-1);
|
|
server.dirty += emptyDb();
|
|
addReply(c,shared.ok);
|
|
if (server.bgsavechildpid != -1) {
|
|
kill(server.bgsavechildpid,SIGKILL);
|
|
rdbRemoveTempFile(server.bgsavechildpid);
|
|
}
|
|
if (server.ds_enabled)
|
|
dsFlushDb(-1);
|
|
else
|
|
rdbSave(server.dbfilename);
|
|
server.dirty++;
|
|
}
|
|
|
|
void delCommand(redisClient *c) {
|
|
int deleted = 0, j;
|
|
|
|
for (j = 1; j < c->argc; j++) {
|
|
if (server.ds_enabled) {
|
|
lookupKeyRead(c->db,c->argv[j]);
|
|
/* FIXME: this can be optimized a lot, no real need to load
|
|
* a possibly huge value. */
|
|
}
|
|
if (dbDelete(c->db,c->argv[j])) {
|
|
signalModifiedKey(c->db,c->argv[j]);
|
|
server.dirty++;
|
|
deleted++;
|
|
} else if (server.ds_enabled) {
|
|
if (cacheKeyMayExist(c->db,c->argv[j]) &&
|
|
dsExists(c->db,c->argv[j]))
|
|
{
|
|
cacheScheduleIO(c->db,c->argv[j],REDIS_IO_SAVE);
|
|
deleted = 1;
|
|
}
|
|
}
|
|
}
|
|
addReplyLongLong(c,deleted);
|
|
}
|
|
|
|
void existsCommand(redisClient *c) {
|
|
expireIfNeeded(c->db,c->argv[1]);
|
|
if (dbExists(c->db,c->argv[1])) {
|
|
addReply(c, shared.cone);
|
|
} else {
|
|
addReply(c, shared.czero);
|
|
}
|
|
}
|
|
|
|
void selectCommand(redisClient *c) {
|
|
int id = atoi(c->argv[1]->ptr);
|
|
|
|
if (server.cluster_enabled && id != 0) {
|
|
addReplyError(c,"SELECT is not allowed in cluster mode");
|
|
return;
|
|
}
|
|
if (selectDb(c,id) == REDIS_ERR) {
|
|
addReplyError(c,"invalid DB index");
|
|
} else {
|
|
addReply(c,shared.ok);
|
|
}
|
|
}
|
|
|
|
void randomkeyCommand(redisClient *c) {
|
|
robj *key;
|
|
|
|
if ((key = dbRandomKey(c->db)) == NULL) {
|
|
addReply(c,shared.nullbulk);
|
|
return;
|
|
}
|
|
|
|
addReplyBulk(c,key);
|
|
decrRefCount(key);
|
|
}
|
|
|
|
void keysCommand(redisClient *c) {
|
|
dictIterator *di;
|
|
dictEntry *de;
|
|
sds pattern = c->argv[1]->ptr;
|
|
int plen = sdslen(pattern), allkeys;
|
|
unsigned long numkeys = 0;
|
|
void *replylen = addDeferredMultiBulkLength(c);
|
|
|
|
di = dictGetIterator(c->db->dict);
|
|
allkeys = (pattern[0] == '*' && pattern[1] == '\0');
|
|
while((de = dictNext(di)) != NULL) {
|
|
sds key = dictGetEntryKey(de);
|
|
robj *keyobj;
|
|
|
|
if (allkeys || stringmatchlen(pattern,plen,key,sdslen(key),0)) {
|
|
keyobj = createStringObject(key,sdslen(key));
|
|
if (expireIfNeeded(c->db,keyobj) == 0) {
|
|
addReplyBulk(c,keyobj);
|
|
numkeys++;
|
|
}
|
|
decrRefCount(keyobj);
|
|
}
|
|
}
|
|
dictReleaseIterator(di);
|
|
setDeferredMultiBulkLength(c,replylen,numkeys);
|
|
}
|
|
|
|
void dbsizeCommand(redisClient *c) {
|
|
addReplyLongLong(c,dictSize(c->db->dict));
|
|
}
|
|
|
|
void lastsaveCommand(redisClient *c) {
|
|
addReplyLongLong(c,server.lastsave);
|
|
}
|
|
|
|
void typeCommand(redisClient *c) {
|
|
robj *o;
|
|
char *type;
|
|
|
|
o = lookupKeyRead(c->db,c->argv[1]);
|
|
if (o == NULL) {
|
|
type = "none";
|
|
} else {
|
|
switch(o->type) {
|
|
case REDIS_STRING: type = "string"; break;
|
|
case REDIS_LIST: type = "list"; break;
|
|
case REDIS_SET: type = "set"; break;
|
|
case REDIS_ZSET: type = "zset"; break;
|
|
case REDIS_HASH: type = "hash"; break;
|
|
default: type = "unknown"; break;
|
|
}
|
|
}
|
|
addReplyStatus(c,type);
|
|
}
|
|
|
|
void shutdownCommand(redisClient *c) {
|
|
if (prepareForShutdown() == REDIS_OK)
|
|
exit(0);
|
|
addReplyError(c,"Errors trying to SHUTDOWN. Check logs.");
|
|
}
|
|
|
|
void renameGenericCommand(redisClient *c, int nx) {
|
|
robj *o;
|
|
|
|
/* To use the same key as src and dst is probably an error */
|
|
if (sdscmp(c->argv[1]->ptr,c->argv[2]->ptr) == 0) {
|
|
addReply(c,shared.sameobjecterr);
|
|
return;
|
|
}
|
|
|
|
if ((o = lookupKeyWriteOrReply(c,c->argv[1],shared.nokeyerr)) == NULL)
|
|
return;
|
|
|
|
incrRefCount(o);
|
|
if (dbAdd(c->db,c->argv[2],o) == REDIS_ERR) {
|
|
if (nx) {
|
|
decrRefCount(o);
|
|
addReply(c,shared.czero);
|
|
return;
|
|
}
|
|
dbReplace(c->db,c->argv[2],o);
|
|
}
|
|
dbDelete(c->db,c->argv[1]);
|
|
signalModifiedKey(c->db,c->argv[1]);
|
|
signalModifiedKey(c->db,c->argv[2]);
|
|
server.dirty++;
|
|
addReply(c,nx ? shared.cone : shared.ok);
|
|
}
|
|
|
|
void renameCommand(redisClient *c) {
|
|
renameGenericCommand(c,0);
|
|
}
|
|
|
|
void renamenxCommand(redisClient *c) {
|
|
renameGenericCommand(c,1);
|
|
}
|
|
|
|
void moveCommand(redisClient *c) {
|
|
robj *o;
|
|
redisDb *src, *dst;
|
|
int srcid;
|
|
|
|
if (server.cluster_enabled) {
|
|
addReplyError(c,"MOVE is not allowed in cluster mode");
|
|
return;
|
|
}
|
|
|
|
/* Obtain source and target DB pointers */
|
|
src = c->db;
|
|
srcid = c->db->id;
|
|
if (selectDb(c,atoi(c->argv[2]->ptr)) == REDIS_ERR) {
|
|
addReply(c,shared.outofrangeerr);
|
|
return;
|
|
}
|
|
dst = c->db;
|
|
selectDb(c,srcid); /* Back to the source DB */
|
|
|
|
/* If the user is moving using as target the same
|
|
* DB as the source DB it is probably an error. */
|
|
if (src == dst) {
|
|
addReply(c,shared.sameobjecterr);
|
|
return;
|
|
}
|
|
|
|
/* Check if the element exists and get a reference */
|
|
o = lookupKeyWrite(c->db,c->argv[1]);
|
|
if (!o) {
|
|
addReply(c,shared.czero);
|
|
return;
|
|
}
|
|
|
|
/* Try to add the element to the target DB */
|
|
if (dbAdd(dst,c->argv[1],o) == REDIS_ERR) {
|
|
addReply(c,shared.czero);
|
|
return;
|
|
}
|
|
incrRefCount(o);
|
|
|
|
/* OK! key moved, free the entry in the source DB */
|
|
dbDelete(src,c->argv[1]);
|
|
server.dirty++;
|
|
addReply(c,shared.cone);
|
|
}
|
|
|
|
/*-----------------------------------------------------------------------------
|
|
* Expires API
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
int removeExpire(redisDb *db, robj *key) {
|
|
/* An expire may only be removed if there is a corresponding entry in the
|
|
* main dict. Otherwise, the key will never be freed. */
|
|
redisAssert(dictFind(db->dict,key->ptr) != NULL);
|
|
return dictDelete(db->expires,key->ptr) == DICT_OK;
|
|
}
|
|
|
|
void setExpire(redisDb *db, robj *key, time_t when) {
|
|
dictEntry *de;
|
|
|
|
/* Reuse the sds from the main dict in the expire dict */
|
|
de = dictFind(db->dict,key->ptr);
|
|
redisAssert(de != NULL);
|
|
dictReplace(db->expires,dictGetEntryKey(de),(void*)when);
|
|
}
|
|
|
|
/* Return the expire time of the specified key, or -1 if no expire
|
|
* is associated with this key (i.e. the key is non volatile) */
|
|
time_t getExpire(redisDb *db, robj *key) {
|
|
dictEntry *de;
|
|
|
|
/* No expire? return ASAP */
|
|
if (dictSize(db->expires) == 0 ||
|
|
(de = dictFind(db->expires,key->ptr)) == NULL) return -1;
|
|
|
|
/* The entry was found in the expire dict, this means it should also
|
|
* be present in the main dict (safety check). */
|
|
redisAssert(dictFind(db->dict,key->ptr) != NULL);
|
|
return (time_t) dictGetEntryVal(de);
|
|
}
|
|
|
|
/* Propagate expires into slaves and the AOF file.
|
|
* When a key expires in the master, a DEL operation for this key is sent
|
|
* to all the slaves and the AOF file if enabled.
|
|
*
|
|
* This way the key expiry is centralized in one place, and since both
|
|
* AOF and the master->slave link guarantee operation ordering, everything
|
|
* will be consistent even if we allow write operations against expiring
|
|
* keys. */
|
|
void propagateExpire(redisDb *db, robj *key) {
|
|
robj *argv[2];
|
|
|
|
argv[0] = createStringObject("DEL",3);
|
|
argv[1] = key;
|
|
incrRefCount(key);
|
|
|
|
if (server.appendonly)
|
|
feedAppendOnlyFile(server.delCommand,db->id,argv,2);
|
|
if (listLength(server.slaves))
|
|
replicationFeedSlaves(server.slaves,db->id,argv,2);
|
|
|
|
decrRefCount(argv[0]);
|
|
decrRefCount(argv[1]);
|
|
}
|
|
|
|
int expireIfNeeded(redisDb *db, robj *key) {
|
|
time_t when = getExpire(db,key);
|
|
|
|
if (when < 0) return 0; /* No expire for this key */
|
|
|
|
/* If we are running in the context of a slave, return ASAP:
|
|
* the slave key expiration is controlled by the master that will
|
|
* send us synthesized DEL operations for expired keys.
|
|
*
|
|
* Still we try to return the right information to the caller,
|
|
* that is, 0 if we think the key should be still valid, 1 if
|
|
* we think the key is expired at this time. */
|
|
if (server.masterhost != NULL) {
|
|
return time(NULL) > when;
|
|
}
|
|
|
|
/* Return when this key has not expired */
|
|
if (time(NULL) <= when) return 0;
|
|
|
|
/* Delete the key */
|
|
server.stat_expiredkeys++;
|
|
propagateExpire(db,key);
|
|
return dbDelete(db,key);
|
|
}
|
|
|
|
/*-----------------------------------------------------------------------------
|
|
* Expires Commands
|
|
*----------------------------------------------------------------------------*/
|
|
|
|
void expireGenericCommand(redisClient *c, robj *key, robj *param, long offset) {
|
|
dictEntry *de;
|
|
long seconds;
|
|
|
|
if (getLongFromObjectOrReply(c, param, &seconds, NULL) != REDIS_OK) return;
|
|
|
|
seconds -= offset;
|
|
|
|
de = dictFind(c->db->dict,key->ptr);
|
|
if (de == NULL) {
|
|
addReply(c,shared.czero);
|
|
return;
|
|
}
|
|
if (seconds <= 0) {
|
|
if (dbDelete(c->db,key)) server.dirty++;
|
|
addReply(c, shared.cone);
|
|
signalModifiedKey(c->db,key);
|
|
return;
|
|
} else {
|
|
time_t when = time(NULL)+seconds;
|
|
setExpire(c->db,key,when);
|
|
addReply(c,shared.cone);
|
|
signalModifiedKey(c->db,key);
|
|
server.dirty++;
|
|
return;
|
|
}
|
|
}
|
|
|
|
void expireCommand(redisClient *c) {
|
|
expireGenericCommand(c,c->argv[1],c->argv[2],0);
|
|
}
|
|
|
|
void expireatCommand(redisClient *c) {
|
|
expireGenericCommand(c,c->argv[1],c->argv[2],time(NULL));
|
|
}
|
|
|
|
void ttlCommand(redisClient *c) {
|
|
time_t expire, ttl = -1;
|
|
|
|
if (server.ds_enabled) lookupKeyRead(c->db,c->argv[1]);
|
|
expire = getExpire(c->db,c->argv[1]);
|
|
if (expire != -1) {
|
|
ttl = (expire-time(NULL));
|
|
if (ttl < 0) ttl = -1;
|
|
}
|
|
addReplyLongLong(c,(long long)ttl);
|
|
}
|
|
|
|
void persistCommand(redisClient *c) {
|
|
dictEntry *de;
|
|
|
|
de = dictFind(c->db->dict,c->argv[1]->ptr);
|
|
if (de == NULL) {
|
|
addReply(c,shared.czero);
|
|
} else {
|
|
if (removeExpire(c->db,c->argv[1])) {
|
|
addReply(c,shared.cone);
|
|
server.dirty++;
|
|
} else {
|
|
addReply(c,shared.czero);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* -----------------------------------------------------------------------------
|
|
* API to get key arguments from commands
|
|
* ---------------------------------------------------------------------------*/
|
|
|
|
int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, int *numkeys) {
|
|
int j, i = 0, last, *keys;
|
|
REDIS_NOTUSED(argv);
|
|
|
|
if (cmd->firstkey == 0) {
|
|
*numkeys = 0;
|
|
return NULL;
|
|
}
|
|
last = cmd->lastkey;
|
|
if (last < 0) last = argc+last;
|
|
keys = zmalloc(sizeof(int)*((last - cmd->firstkey)+1));
|
|
for (j = cmd->firstkey; j <= last; j += cmd->keystep) {
|
|
redisAssert(j < argc);
|
|
keys[i++] = j;
|
|
}
|
|
*numkeys = i;
|
|
return keys;
|
|
}
|
|
|
|
int *getKeysFromCommand(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) {
|
|
if (cmd->getkeys_proc) {
|
|
return cmd->getkeys_proc(cmd,argv,argc,numkeys,flags);
|
|
} else {
|
|
return getKeysUsingCommandTable(cmd,argv,argc,numkeys);
|
|
}
|
|
}
|
|
|
|
void getKeysFreeResult(int *result) {
|
|
zfree(result);
|
|
}
|
|
|
|
int *noPreloadGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) {
|
|
if (flags & REDIS_GETKEYS_PRELOAD) {
|
|
*numkeys = 0;
|
|
return NULL;
|
|
} else {
|
|
return getKeysUsingCommandTable(cmd,argv,argc,numkeys);
|
|
}
|
|
}
|
|
|
|
int *renameGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) {
|
|
if (flags & REDIS_GETKEYS_PRELOAD) {
|
|
int *keys = zmalloc(sizeof(int));
|
|
*numkeys = 1;
|
|
keys[0] = 1;
|
|
return keys;
|
|
} else {
|
|
return getKeysUsingCommandTable(cmd,argv,argc,numkeys);
|
|
}
|
|
}
|
|
|
|
int *zunionInterGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) {
|
|
int i, num, *keys;
|
|
REDIS_NOTUSED(cmd);
|
|
REDIS_NOTUSED(flags);
|
|
|
|
num = atoi(argv[2]->ptr);
|
|
/* Sanity check. Don't return any key if the command is going to
|
|
* reply with syntax error. */
|
|
if (num > (argc-3)) {
|
|
*numkeys = 0;
|
|
return NULL;
|
|
}
|
|
keys = zmalloc(sizeof(int)*num);
|
|
for (i = 0; i < num; i++) keys[i] = 3+i;
|
|
*numkeys = num;
|
|
return keys;
|
|
}
|
|
|
|
/* Slot to Key API. This is used by Redis Cluster in order to obtain in
|
|
* a fast way a key that belongs to a specified hash slot. This is useful
|
|
* while rehashing the cluster. */
|
|
void SlotToKeyAdd(robj *key) {
|
|
unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr));
|
|
|
|
zslInsert(server.cluster.slots_to_keys,hashslot,key);
|
|
incrRefCount(key);
|
|
}
|
|
|
|
void SlotToKeyDel(robj *key) {
|
|
unsigned int hashslot = keyHashSlot(key->ptr,sdslen(key->ptr));
|
|
|
|
zslDelete(server.cluster.slots_to_keys,hashslot,key);
|
|
}
|
|
|
|
unsigned int GetKeysInSlot(unsigned int hashslot, robj **keys, unsigned int count) {
|
|
zskiplistNode *n;
|
|
zrangespec range;
|
|
int j = 0;
|
|
|
|
range.min = range.max = hashslot;
|
|
range.minex = range.maxex = 0;
|
|
|
|
n = zslFirstInRange(server.cluster.slots_to_keys, range);
|
|
while(n && n->score == hashslot && count--) {
|
|
keys[j++] = n->obj;
|
|
n = n->level[0].forward;
|
|
}
|
|
return j;
|
|
}
|