From 2a51bac44ebcff79dcd8ede2cab66ca213c9d504 Mon Sep 17 00:00:00 2001 From: antirez Date: Thu, 4 May 2017 17:00:53 +0200 Subject: [PATCH] Simplify atomicvar.h usage by having the mutex name implicit. --- src/atomicvar.h | 30 +++++++++++++++--------------- src/lazyfree.c | 16 +++++++--------- src/zmalloc.c | 6 +++--- 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/src/atomicvar.h b/src/atomicvar.h index c522db3e9..1efa7bffb 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -54,40 +54,40 @@ #if defined(__ATOMIC_RELAXED) && !defined(__sun) && (!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057) /* Implementation using __atomic macros. */ -#define atomicIncr(var,count,mutex) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED) -#define atomicDecr(var,count,mutex) __atomic_sub_fetch(&var,(count),__ATOMIC_RELAXED) -#define atomicGet(var,dstvar,mutex) do { \ +#define atomicIncr(var,count) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED) +#define atomicDecr(var,count) __atomic_sub_fetch(&var,(count),__ATOMIC_RELAXED) +#define atomicGet(var,dstvar) do { \ dstvar = __atomic_load_n(&var,__ATOMIC_RELAXED); \ } while(0) #elif defined(HAVE_ATOMIC) /* Implementation using __sync macros. */ -#define atomicIncr(var,count,mutex) __sync_add_and_fetch(&var,(count)) -#define atomicDecr(var,count,mutex) __sync_sub_and_fetch(&var,(count)) -#define atomicGet(var,dstvar,mutex) do { \ +#define atomicIncr(var,count) __sync_add_and_fetch(&var,(count)) +#define atomicDecr(var,count) __sync_sub_and_fetch(&var,(count)) +#define atomicGet(var,dstvar) do { \ dstvar = __sync_sub_and_fetch(&var,0); \ } while(0) #else /* Implementation using pthread mutex. */ -#define atomicIncr(var,count,mutex) do { \ - pthread_mutex_lock(&mutex); \ +#define atomicIncr(var,count) do { \ + pthread_mutex_lock(&var ## _mutex); \ var += (count); \ - pthread_mutex_unlock(&mutex); \ + pthread_mutex_unlock(&var ## _mutex); \ } while(0) -#define atomicDecr(var,count,mutex) do { \ - pthread_mutex_lock(&mutex); \ +#define atomicDecr(var,count) do { \ + pthread_mutex_lock(&var ## _mutex); \ var -= (count); \ - pthread_mutex_unlock(&mutex); \ + pthread_mutex_unlock(&var ## _mutex); \ } while(0) -#define atomicGet(var,dstvar,mutex) do { \ - pthread_mutex_lock(&mutex); \ +#define atomicGet(var,dstvar) do { \ + pthread_mutex_lock(&var ## _mutex); \ dstvar = var; \ - pthread_mutex_unlock(&mutex); \ + pthread_mutex_unlock(&var ## _mutex); \ } while(0) #endif diff --git a/src/lazyfree.c b/src/lazyfree.c index 64ed68466..809ebdb57 100644 --- a/src/lazyfree.c +++ b/src/lazyfree.c @@ -9,7 +9,7 @@ pthread_mutex_t lazyfree_objects_mutex = PTHREAD_MUTEX_INITIALIZER; /* Return the number of currently pending objects to free. */ size_t lazyfreeGetPendingObjectsCount(void) { size_t aux; - atomicGet(lazyfree_objects,aux,lazyfree_objects_mutex); + atomicGet(lazyfree_objects,aux); return aux; } @@ -67,7 +67,7 @@ int dbAsyncDelete(redisDb *db, robj *key) { /* If releasing the object is too much work, let's put it into the * lazy free list. */ if (free_effort > LAZYFREE_THRESHOLD) { - atomicIncr(lazyfree_objects,1,lazyfree_objects_mutex); + atomicIncr(lazyfree_objects,1); bioCreateBackgroundJob(BIO_LAZY_FREE,val,NULL,NULL); dictSetVal(db->dict,de,NULL); } @@ -91,8 +91,7 @@ void emptyDbAsync(redisDb *db) { dict *oldht1 = db->dict, *oldht2 = db->expires; db->dict = dictCreate(&dbDictType,NULL); db->expires = dictCreate(&keyptrDictType,NULL); - atomicIncr(lazyfree_objects,dictSize(oldht1), - lazyfree_objects_mutex); + atomicIncr(lazyfree_objects,dictSize(oldht1)); bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,oldht1,oldht2); } @@ -104,8 +103,7 @@ void slotToKeyFlushAsync(void) { server.cluster->slots_to_keys = raxNew(); memset(server.cluster->slots_keys_count,0, sizeof(server.cluster->slots_keys_count)); - atomicIncr(lazyfree_objects,old->numele, - lazyfree_objects_mutex); + atomicIncr(lazyfree_objects,old->numele); bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,old); } @@ -113,7 +111,7 @@ void slotToKeyFlushAsync(void) { * updating the count of objects to release. */ void lazyfreeFreeObjectFromBioThread(robj *o) { decrRefCount(o); - atomicDecr(lazyfree_objects,1,lazyfree_objects_mutex); + atomicDecr(lazyfree_objects,1); } /* Release a database from the lazyfree thread. The 'db' pointer is the @@ -125,7 +123,7 @@ void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) { size_t numkeys = dictSize(ht1); dictRelease(ht1); dictRelease(ht2); - atomicDecr(lazyfree_objects,numkeys,lazyfree_objects_mutex); + atomicDecr(lazyfree_objects,numkeys); } /* Release the skiplist mapping Redis Cluster keys to slots in the @@ -133,5 +131,5 @@ void lazyfreeFreeDatabaseFromBioThread(dict *ht1, dict *ht2) { void lazyfreeFreeSlotsMapFromBioThread(rax *rt) { size_t len = rt->numele; raxFree(rt); - atomicDecr(lazyfree_objects,len,lazyfree_objects_mutex); + atomicDecr(lazyfree_objects,len); } diff --git a/src/zmalloc.c b/src/zmalloc.c index f653760a7..dfcfc01d4 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -74,7 +74,7 @@ void zlibc_free(void *ptr) { size_t _n = (__n); \ if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ if (zmalloc_thread_safe) { \ - atomicIncr(used_memory,__n,used_memory_mutex); \ + atomicIncr(used_memory,__n); \ } else { \ used_memory += _n; \ } \ @@ -84,7 +84,7 @@ void zlibc_free(void *ptr) { size_t _n = (__n); \ if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ if (zmalloc_thread_safe) { \ - atomicDecr(used_memory,__n,used_memory_mutex); \ + atomicDecr(used_memory,__n); \ } else { \ used_memory -= _n; \ } \ @@ -222,7 +222,7 @@ size_t zmalloc_used_memory(void) { size_t um; if (zmalloc_thread_safe) { - atomicGet(used_memory,um,used_memory_mutex); + atomicGet(used_memory,um); } else { um = used_memory; }