mirror of
https://codeberg.org/redict/redict.git
synced 2025-01-23 00:28:26 -05:00
1c71038540
List of squashed commits or PRs =============================== commit 66801ea Author: hwware <wen.hui.ware@gmail.com> Date: Mon Jan 13 00:54:31 2020 -0500 typo fix in acl.c commit 46f55db Author: Itamar Haber <itamar@redislabs.com> Date: Sun Sep 6 18:24:11 2020 +0300 Updates a couple of comments Specifically: * RM_AutoMemory completed instead of pointing to docs * Updated link to custom type doc commit 61a2aa0 Author: xindoo <xindoo@qq.com> Date: Tue Sep 1 19:24:59 2020 +0800 Correct errors in code comments commit a5871d1 Author: yz1509 <pro-756@qq.com> Date: Tue Sep 1 18:36:06 2020 +0800 fix typos in module.c commit 41eede7 Author: bookug <bookug@qq.com> Date: Sat Aug 15 01:11:33 2020 +0800 docs: fix typos in comments commit c303c84 Author: lazy-snail <ws.niu@outlook.com> Date: Fri Aug 7 11:15:44 2020 +0800 fix spelling in redis.conf commit 1eb76bf Author: zhujian <zhujianxyz@gmail.com> Date: Thu Aug 6 15:22:10 2020 +0800 add a missing 'n' in comment commit 1530ec2 Author: Daniel Dai <764122422@qq.com> Date: Mon Jul 27 00:46:35 2020 -0400 fix spelling in tracking.c commit e517b31 Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:32 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit c300eff Author: Hunter-Chen <huntcool001@gmail.com> Date: Fri Jul 17 22:33:23 2020 +0800 Update redis.conf Co-authored-by: Itamar Haber <itamar@redislabs.com> commit 4c058a8 Author: 陈浩鹏 <chenhaopeng@heytea.com> Date: Thu Jun 25 19:00:56 2020 +0800 Grammar fix and clarification commit 5fcaa81 Author: bodong.ybd <bodong.ybd@alibaba-inc.com> Date: Fri Jun 19 10:09:00 2020 +0800 Fix typos commit 4caca9a Author: Pruthvi P <pruthvi@ixigo.com> Date: Fri May 22 00:33:22 2020 +0530 Fix typo eviciton => eviction commit b2a25f6 Author: Brad Dunbar <dunbarb2@gmail.com> Date: Sun May 17 12:39:59 2020 -0400 Fix a typo. commit 12842ae Author: hwware <wen.hui.ware@gmail.com> Date: Sun May 3 17:16:59 2020 -0400 fix spelling in redis conf commit ddba07c Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Sat May 2 23:25:34 2020 +0100 Correct a "conflicts" spelling error. commit 8fc7bf2 Author: Nao YONASHIRO <yonashiro@r.recruit.co.jp> Date: Thu Apr 30 10:25:27 2020 +0900 docs: fix EXPIRE_FAST_CYCLE_DURATION to ACTIVE_EXPIRE_CYCLE_FAST_DURATION commit 9b2b67a Author: Brad Dunbar <dunbarb2@gmail.com> Date: Fri Apr 24 11:46:22 2020 -0400 Fix a typo. commit 0746f10 Author: devilinrust <63737265+devilinrust@users.noreply.github.com> Date: Thu Apr 16 00:17:53 2020 +0200 Fix typos in server.c commit 92b588d Author: benjessop12 <56115861+benjessop12@users.noreply.github.com> Date: Mon Apr 13 13:43:55 2020 +0100 Fix spelling mistake in lazyfree.c commit 1da37aa Merge: 2d4ba28af347a8
Author: hwware <wen.hui.ware@gmail.com> Date: Thu Mar 5 22:41:31 2020 -0500 Merge remote-tracking branch 'upstream/unstable' into expiretypofix commit 2d4ba28 Author: hwware <wen.hui.ware@gmail.com> Date: Mon Mar 2 00:09:40 2020 -0500 fix typo in expire.c commit 1a746f7 Author: SennoYuki <minakami1yuki@gmail.com> Date: Thu Feb 27 16:54:32 2020 +0800 fix typo commit 8599b1a Author: dongheejeong <donghee950403@gmail.com> Date: Sun Feb 16 20:31:43 2020 +0000 Fix typo in server.c commit f38d4e8 Author: hwware <wen.hui.ware@gmail.com> Date: Sun Feb 2 22:58:38 2020 -0500 fix typo in evict.c commit fe143fc Author: Leo Murillo <leonardo.murillo@gmail.com> Date: Sun Feb 2 01:57:22 2020 -0600 Fix a few typos in redis.conf commit 1ab4d21 Author: viraja1 <anchan.viraj@gmail.com> Date: Fri Dec 27 17:15:58 2019 +0530 Fix typo in Latency API docstring commit ca1f70e Author: gosth <danxuedexing@qq.com> Date: Wed Dec 18 15:18:02 2019 +0800 fix typo in sort.c commit a57c06b Author: ZYunH <zyunhjob@163.com> Date: Mon Dec 16 22:28:46 2019 +0800 fix-zset-typo commit b8c92b5 Author: git-hulk <hulk.website@gmail.com> Date: Mon Dec 16 15:51:42 2019 +0800 FIX: typo in cluster.c, onformation->information commit 9dd981c Author: wujm2007 <jim.wujm@gmail.com> Date: Mon Dec 16 09:37:52 2019 +0800 Fix typo commit e132d7a Author: Sebastien Williams-Wynn <s.williamswynn.mail@gmail.com> Date: Fri Nov 15 00:14:07 2019 +0000 Minor typo change commit 47f44d5 Author: happynote3966 <01ssrmikururudevice01@gmail.com> Date: Mon Nov 11 22:08:48 2019 +0900 fix comment typo in redis-cli.c commit b8bdb0d Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 18:00:17 2019 +0800 Fix a spelling mistake of comments in defragDictBucketCallback commit 0def46a Author: fulei <fulei@kuaishou.com> Date: Wed Oct 16 13:09:27 2019 +0800 fix some spelling mistakes of comments in defrag.c commit f3596fd Author: Phil Rajchgot <tophil@outlook.com> Date: Sun Oct 13 02:02:32 2019 -0400 Typo and grammar fixes Redis and its documentation are great -- just wanted to submit a few corrections in the spirit of Hacktoberfest. Thanks for all your work on this project. I use it all the time and it works beautifully. commit 2b928cd Author: KangZhiDong <worldkzd@gmail.com> Date: Sun Sep 1 07:03:11 2019 +0800 fix typos commit 33aea14 Author: Axlgrep <axlgrep@gmail.com> Date: Tue Aug 27 11:02:18 2019 +0800 Fixed eviction spelling issues commit e282a80 Author: Simen Flatby <simen@oms.no> Date: Tue Aug 20 15:25:51 2019 +0200 Update comments to reflect prop name In the comments the prop is referenced as replica-validity-factor, but it is really named cluster-replica-validity-factor. commit 74d1f9a Author: Jim Green <jimgreen2013@qq.com> Date: Tue Aug 20 20:00:31 2019 +0800 fix comment error, the code is ok commit eea1407 Author: Liao Tonglang <liaotonglang@gmail.com> Date: Fri May 31 10:16:18 2019 +0800 typo fix fix cna't to can't commit 0da553c Author: KAWACHI Takashi <tkawachi@gmail.com> Date: Wed Jul 17 00:38:16 2019 +0900 Fix typo commit 7fc8fb6 Author: Michael Prokop <mika@grml.org> Date: Tue May 28 17:58:42 2019 +0200 Typo fixes s/familar/familiar/ s/compatiblity/compatibility/ s/ ot / to / s/itsef/itself/ commit 5f46c9d Author: zhumoing <34539422+zhumoing@users.noreply.github.com> Date: Tue May 21 21:16:50 2019 +0800 typo-fixes typo-fixes commit 321dfe1 Author: wxisme <850885154@qq.com> Date: Sat Mar 16 15:10:55 2019 +0800 typo fix commit b4fb131 Merge: 267e0e63df1eb8
Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Fri Feb 8 22:55:45 2019 +0200 Merge branch 'unstable' of antirez/redis into unstable commit 267e0e6 Author: Nikitas Bastas <nikitasbst@gmail.com> Date: Wed Jan 30 21:26:04 2019 +0200 Minor typo fix commit 30544e7 Author: inshal96 <39904558+inshal96@users.noreply.github.com> Date: Fri Jan 4 16:54:50 2019 +0500 remove an extra 'a' in the comments commit 337969d Author: BrotherGao <yangdongheng11@gmail.com> Date: Sat Dec 29 12:37:29 2018 +0800 fix typo in redis.conf commit 9f4b121 Merge: 423a030e504583
Author: BrotherGao <yangdongheng@xiaomi.com> Date: Sat Dec 29 11:41:12 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 423a030 Merge: 42b02b746a51cd
Author: 杨东衡 <yangdongheng@xiaomi.com> Date: Tue Dec 4 23:56:11 2018 +0800 Merge branch 'unstable' of antirez/redis into unstable commit 42b02b7 Merge:68c0e6e
b8febe6
Author: Dongheng Yang <yangdongheng11@gmail.com> Date: Sun Oct 28 15:54:23 2018 +0800 Merge pull request #1 from antirez/unstable update local data commit 714b589 Author: Christian <crifei93@gmail.com> Date: Fri Dec 28 01:17:26 2018 +0100 fix typo "resulution" commit e23259d Author: garenchan <1412950785@qq.com> Date: Wed Dec 26 09:58:35 2018 +0800 fix typo: segfauls -> segfault commit a9359f8 Author: xjp <jianping_xie@aliyun.com> Date: Tue Dec 18 17:31:44 2018 +0800 Fixed REDISMODULE_H spell bug commit a12c3e4 Author: jdiaz <jrd.palacios@gmail.com> Date: Sat Dec 15 23:39:52 2018 -0600 Fixes hyperloglog hash function comment block description commit 770eb11 Author: 林上耀 <1210tom@163.com> Date: Sun Nov 25 17:16:10 2018 +0800 fix typo commit fd97fbb Author: Chris Lamb <chris@chris-lamb.co.uk> Date: Fri Nov 23 17:14:01 2018 +0100 Correct "unsupported" typo. commit a85522d Author: Jungnam Lee <jungnam.lee@oracle.com> Date: Thu Nov 8 23:01:29 2018 +0900 fix typo in test comments commit ade8007 Author: Arun Kumar <palerdot@users.noreply.github.com> Date: Tue Oct 23 16:56:35 2018 +0530 Fixed grammatical typo Fixed typo for word 'dictionary' commit 869ee39 Author: Hamid Alaei <hamid.a85@gmail.com> Date: Sun Aug 12 16:40:02 2018 +0430 fix documentations: (ThreadSafeContextStart/Stop -> ThreadSafeContextLock/Unlock), minor typo commit f89d158 Author: Mayank Jain <mayankjain255@gmail.com> Date: Tue Jul 31 23:01:21 2018 +0530 Updated README.md with some spelling corrections. Made correction in spelling of some misspelled words. commit 892198e Author: dsomeshwar <someshwar.dhayalan@gmail.com> Date: Sat Jul 21 23:23:04 2018 +0530 typo fix commit 8a4d780 Author: Itamar Haber <itamar@redislabs.com> Date: Mon Apr 30 02:06:52 2018 +0300 Fixes some typos commit e3acef6 Author: Noah Rosamilia <ivoahivoah@gmail.com> Date: Sat Mar 3 23:41:21 2018 -0500 Fix typo in /deps/README.md commit 04442fb Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:32:42 2018 +0800 Fix typo in readSyncBulkPayload() comment. commit 9f36880 Author: WuYunlong <xzsyeb@126.com> Date: Sat Mar 3 10:20:37 2018 +0800 replication.c comment: run_id -> replid. commit f866b4a Author: Francesco 'makevoid' Canessa <makevoid@gmail.com> Date: Thu Feb 22 22:01:56 2018 +0000 fix comment typo in server.c commit 0ebc69b Author: 줍 <jubee0124@gmail.com> Date: Mon Feb 12 16:38:48 2018 +0900 Fix typo in redis.conf Fix `five behaviors` to `eight behaviors` in [this sentence ](antirez/redis@unstable/redis.conf#L564) commit b50a620 Author: martinbroadhurst <martinbroadhurst@users.noreply.github.com> Date: Thu Dec 28 12:07:30 2017 +0000 Fix typo in valgrind.sup commit 7d8f349 Author: Peter Boughton <peter@sorcerersisle.com> Date: Mon Nov 27 19:52:19 2017 +0000 Update CONTRIBUTING; refer doc updates to redis-doc repo. commit 02dec7e Author: Klauswk <klauswk1@hotmail.com> Date: Tue Oct 24 16:18:38 2017 -0200 Fix typo in comment commit e1efbc8 Author: chenshi <baiwfg2@gmail.com> Date: Tue Oct 3 18:26:30 2017 +0800 Correct two spelling errors of comments commit 93327d8 Author: spacewander <spacewanderlzx@gmail.com> Date: Wed Sep 13 16:47:24 2017 +0800 Update the comment for OBJ_ENCODING_EMBSTR_SIZE_LIMIT's value The value of OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 now instead of 39. commit 63d361f Author: spacewander <spacewanderlzx@gmail.com> Date: Tue Sep 12 15:06:42 2017 +0800 Fix <prevlen> related doc in ziplist.c According to the definition of ZIP_BIG_PREVLEN and other related code, the guard of single byte <prevlen> should be 254 instead of 255. commit ebe228d Author: hanael80 <hanael80@gmail.com> Date: Tue Aug 15 09:09:40 2017 +0900 Fix typo commit 6b696e6 Author: Matt Robenolt <matt@ydekproductions.com> Date: Mon Aug 14 14:50:47 2017 -0700 Fix typo in LATENCY DOCTOR output commit a2ec6ae Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 15 14:15:16 2017 +0800 Fix a typo: form => from commit 3ab7699 Author: caosiyang <caosiyang@qiyi.com> Date: Thu Aug 10 18:40:33 2017 +0800 Fix a typo: replicationFeedSlavesFromMaster() => replicationFeedSlavesFromMasterStream() commit 72d43ef Author: caosiyang <caosiyang@qiyi.com> Date: Tue Aug 8 15:57:25 2017 +0800 fix a typo: servewr => server commit 707c958 Author: Bo Cai <charpty@gmail.com> Date: Wed Jul 26 21:49:42 2017 +0800 redis-cli.c typo: conut -> count. Signed-off-by: Bo Cai <charpty@gmail.com> commit b9385b2 Author: JackDrogon <jack.xsuperman@gmail.com> Date: Fri Jun 30 14:22:31 2017 +0800 Fix some spell problems commit 20d9230 Author: akosel <aaronjkosel@gmail.com> Date: Sun Jun 4 19:35:13 2017 -0500 Fix typo commit b167bfc Author: Krzysiek Witkowicz <krzysiekwitkowicz@gmail.com> Date: Mon May 22 21:32:27 2017 +0100 Fix #4008 small typo in comment commit 2b78ac8 Author: Jake Clarkson <jacobwclarkson@gmail.com> Date: Wed Apr 26 15:49:50 2017 +0100 Correct typo in tests/unit/hyperloglog.tcl commit b0f1cdb Author: Qi Luo <qiluo-msft@users.noreply.github.com> Date: Wed Apr 19 14:25:18 2017 -0700 Fix typo commit a90b0f9 Author: charsyam <charsyam@naver.com> Date: Thu Mar 16 18:19:53 2017 +0900 fix typos fix typos fix typos commit 8430a79 Author: Richard Hart <richardhart92@gmail.com> Date: Mon Mar 13 22:17:41 2017 -0400 Fixed log message typo in listenToPort. commit 481a1c2 Author: Vinod Kumar <kumar003vinod@gmail.com> Date: Sun Jan 15 23:04:51 2017 +0530 src/db.c: Correct "save" -> "safe" typo commit 586b4d3 Author: wangshaonan <wshn13@gmail.com> Date: Wed Dec 21 20:28:27 2016 +0800 Fix typo they->the in helloworld.c commit c1c4b5e Author: Jenner <hypxm@qq.com> Date: Mon Dec 19 16:39:46 2016 +0800 typo error commit 1ee1a3f Author: tielei <43289893@qq.com> Date: Mon Jul 18 13:52:25 2016 +0800 fix some comments commit 11a41fb Author: Otto Kekäläinen <otto@seravo.fi> Date: Sun Jul 3 10:23:55 2016 +0100 Fix spelling in documentation and comments commit 5fb5d82 Author: francischan <f1ancis621@gmail.com> Date: Tue Jun 28 00:19:33 2016 +0800 Fix outdated comments about redis.c file. It should now refer to server.c file. commit 6b254bc Author: lmatt-bit <lmatt123n@gmail.com> Date: Thu Apr 21 21:45:58 2016 +0800 Refine the comment of dictRehashMilliseconds func SLAVECONF->REPLCONF in comment - by andyli029 commit ee9869f Author: clark.kang <charsyam@naver.com> Date: Tue Mar 22 11:09:51 2016 +0900 fix typos commit f7b3b11 Author: Harisankar H <harisankarh@gmail.com> Date: Wed Mar 9 11:49:42 2016 +0530 Typo correction: "faield" --> "failed" Typo correction: "faield" --> "failed" commit 3fd40fc Author: Itamar Haber <itamar@redislabs.com> Date: Thu Feb 25 10:31:51 2016 +0200 Fixes a typo in comments commit 621c160 Author: Prayag Verma <prayag.verma@gmail.com> Date: Mon Feb 1 12:36:20 2016 +0530 Fix typo in Readme.md Spelling mistakes - `eviciton` > `eviction` `familar` > `familiar` commit d7d07d6 Author: WonCheol Lee <toctoc21c@gmail.com> Date: Wed Dec 30 15:11:34 2015 +0900 Typo fixed commit a4dade7 Author: Felix Bünemann <buenemann@louis.info> Date: Mon Dec 28 11:02:55 2015 +0100 [ci skip] Improve supervised upstart config docs This mentions that "expect stop" is required for supervised upstart to work correctly. See http://upstart.ubuntu.com/cookbook/#expect-stop for an explanation. commit d9caba9 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:30:03 2015 +1100 README: Remove trailing whitespace commit 72d42e5 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:32 2015 +1100 README: Fix typo. th => the commit dd6e957 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:29:20 2015 +1100 README: Fix typo. familar => familiar commit 3a12b23 Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:28:54 2015 +1100 README: Fix typo. eviciton => eviction commit 2d1d03b Author: daurnimator <quae@daurnimator.com> Date: Mon Dec 21 18:21:45 2015 +1100 README: Fix typo. sever => server commit 3973b06 Author: Itamar Haber <itamar@garantiadata.com> Date: Sat Dec 19 17:01:20 2015 +0200 Typo fix commit 4f2e460 Author: Steve Gao <fu@2token.com> Date: Fri Dec 4 10:22:05 2015 +0800 Update README - fix typos commit b21667c Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:48:37 2015 +0800 delete redundancy color judge in sdscatcolor commit 88894c7 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 22:14:42 2015 +0800 the example output shoule be HelloWorld commit 2763470 Author: binyan <binbin.yan@nokia.com> Date: Wed Dec 2 17:41:39 2015 +0800 modify error word keyevente Signed-off-by: binyan <binbin.yan@nokia.com> commit 0847b3d Author: Bruno Martins <bscmartins@gmail.com> Date: Wed Nov 4 11:37:01 2015 +0000 typo commit bbb9e9e Author: dawedawe <dawedawe@gmx.de> Date: Fri Mar 27 00:46:41 2015 +0100 typo: zimap -> zipmap commit 5ed297e Author: Axel Advento <badwolf.bloodseeker.rev@gmail.com> Date: Tue Mar 3 15:58:29 2015 +0800 Fix 'salve' typos to 'slave' commit edec9d6 Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Wed Jun 12 14:12:47 2019 +0200 Update README.md Co-Authored-By: Qix <Qix-@users.noreply.github.com> commit 692a7af Author: LudwikJaniuk <ludvig.janiuk@gmail.com> Date: Tue May 28 14:32:04 2019 +0200 grammar commit d962b0a Author: Nick Frost <nickfrostatx@gmail.com> Date: Wed Jul 20 15:17:12 2016 -0700 Minor grammar fix commit 24fff01aaccaf5956973ada8c50ceb1462e211c6 (typos) Author: Chad Miller <chadm@squareup.com> Date: Tue Sep 8 13:46:11 2020 -0400 Fix faulty comment about operation of unlink() commit 3cd5c1f3326c52aa552ada7ec797c6bb16452355 Author: Kevin <kevin.xgr@gmail.com> Date: Wed Nov 20 00:13:50 2019 +0800 Fix typo in server.c. From a83af59 Mon Sep 17 00:00:00 2001 From: wuwo <wuwo@wacai.com> Date: Fri, 17 Mar 2017 20:37:45 +0800 Subject: [PATCH] falure to failure From c961896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B7=A6=E6=87=B6?= <veficos@gmail.com> Date: Sat, 27 May 2017 15:33:04 +0800 Subject: [PATCH] fix typo From e600ef2 Mon Sep 17 00:00:00 2001 From: "rui.zou" <rui.zou@yunify.com> Date: Sat, 30 Sep 2017 12:38:15 +0800 Subject: [PATCH] fix a typo From c7d07fa Mon Sep 17 00:00:00 2001 From: Alexandre Perrin <alex@kaworu.ch> Date: Thu, 16 Aug 2018 10:35:31 +0200 Subject: [PATCH] deps README.md typo From b25cb67 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 10:55:37 +0300 Subject: [PATCH 1/2] fix typos in header From ad28ca6 Mon Sep 17 00:00:00 2001 From: Guy Korland <gkorland@gmail.com> Date: Wed, 26 Sep 2018 11:02:36 +0300 Subject: [PATCH 2/2] fix typos commit 34924cdedd8552466fc22c1168d49236cb7ee915 Author: Adrian Lynch <adi_ady_ade@hotmail.com> Date: Sat Apr 4 21:59:15 2015 +0100 Typos fixed commit fd2a1e7 Author: Jan <jsteemann@users.noreply.github.com> Date: Sat Oct 27 19:13:01 2018 +0200 Fix typos Fix typos commit e14e47c1a234b53b0e103c5f6a1c61481cbcbb02 Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:30:07 2019 -0500 Fix multiple misspellings of "following" commit 79b948ce2dac6b453fe80995abbcaac04c213d5a Author: Andy Lester <andy@petdance.com> Date: Fri Aug 2 22:24:28 2019 -0500 Fix misspelling of create-cluster commit 1fffde52666dc99ab35efbd31071a4c008cb5a71 Author: Andy Lester <andy@petdance.com> Date: Wed Jul 31 17:57:56 2019 -0500 Fix typos commit 204c9ba9651e9e05fd73936b452b9a30be456cfe Author: Xiaobo Zhu <xiaobo.zhu@shopee.com> Date: Tue Aug 13 22:19:25 2019 +0800 fix typos Squashed commit of the following: commit 1d9aaf8 Author: danmedani <danmedani@gmail.com> Date: Sun Aug 2 11:40:26 2015 -0700 README typo fix. Squashed commit of the following: commit 32bfa7c Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Jul 6 21:15:08 2015 +0200 Fixed grammer Squashed commit of the following: commit b24f69c Author: Sisir Koppaka <sisir.koppaka@gmail.com> Date: Mon Mar 2 22:38:45 2015 -0500 utils/hashtable/rehashing.c: Fix typos Squashed commit of the following: commit 4e04082 Author: Erik Dubbelboer <erik@dubbelboer.com> Date: Mon Mar 23 08:22:21 2015 +0000 Small config file documentation improvements Squashed commit of the following: commit acb8773 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:52:48 2015 -0700 Typo and grammar fixes in readme commit 2eb75b6 Author: ctd1500 <ctd1500@gmail.com> Date: Fri May 8 01:36:18 2015 -0700 fixed redis.conf comment Squashed commit of the following: commit a8249a2 Author: Masahiko Sawada <sawada.mshk@gmail.com> Date: Fri Dec 11 11:39:52 2015 +0530 Revise correction of typos. Squashed commit of the following: commit 3c02028 Author: zhaojun11 <zhaojun11@jd.com> Date: Wed Jan 17 19:05:28 2018 +0800 Fix typos include two code typos in cluster.c and latency.c Squashed commit of the following: commit 9dba47c Author: q191201771 <191201771@qq.com> Date: Sat Jan 4 11:31:04 2020 +0800 fix function listCreate comment in adlist.c Update src/server.c commit 2c7c2cb536e78dd211b1ac6f7bda00f0f54faaeb Author: charpty <charpty@gmail.com> Date: Tue May 1 23:16:59 2018 +0800 server.c typo: modules system dictionary type comment Signed-off-by: charpty <charpty@gmail.com> commit a8395323fb63cb59cb3591cb0f0c8edb7c29a680 Author: Itamar Haber <itamar@redislabs.com> Date: Sun May 6 00:25:18 2018 +0300 Updates test_helper.tcl's help with undocumented options Specifically: * Host * Port * Client commit bde6f9ced15755cd6407b4af7d601b030f36d60b Author: wxisme <850885154@qq.com> Date: Wed Aug 8 15:19:19 2018 +0800 fix comments in deps files commit 3172474ba991532ab799ee1873439f3402412331 Author: wxisme <850885154@qq.com> Date: Wed Aug 8 14:33:49 2018 +0800 fix some comments commit 01b6f2b6858b5cf2ce4ad5092d2c746e755f53f0 Author: Thor Juhasz <thor@juhasz.pro> Date: Sun Nov 18 14:37:41 2018 +0100 Minor fixes to comments Found some parts a little unclear on a first read, which prompted me to have a better look at the file and fix some minor things I noticed. Fixing minor typos and grammar. There are no changes to configuration options. These changes are only meant to help the user better understand the explanations to the various configuration options
1260 lines
42 KiB
C
1260 lines
42 KiB
C
/* Hash Tables Implementation.
|
|
*
|
|
* This file implements in memory hash tables with insert/del/replace/find/
|
|
* get-random-element operations. Hash tables will auto resize if needed
|
|
* tables of power of two in size are used, collisions are handled by
|
|
* chaining. See the source code for more information... :)
|
|
*
|
|
* Copyright (c) 2006-2012, Salvatore Sanfilippo <antirez at gmail dot com>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are met:
|
|
*
|
|
* * Redistributions of source code must retain the above copyright notice,
|
|
* this list of conditions and the following disclaimer.
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* * Neither the name of Redis nor the names of its contributors may be used
|
|
* to endorse or promote products derived from this software without
|
|
* specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include "fmacros.h"
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
#include <stdarg.h>
|
|
#include <limits.h>
|
|
#include <sys/time.h>
|
|
|
|
#include "dict.h"
|
|
#include "zmalloc.h"
|
|
#ifndef DICT_BENCHMARK_MAIN
|
|
#include "redisassert.h"
|
|
#else
|
|
#include <assert.h>
|
|
#endif
|
|
|
|
/* Using dictEnableResize() / dictDisableResize() we make possible to
|
|
* enable/disable resizing of the hash table as needed. This is very important
|
|
* for Redis, as we use copy-on-write and don't want to move too much memory
|
|
* around when there is a child performing saving operations.
|
|
*
|
|
* Note that even when dict_can_resize is set to 0, not all resizes are
|
|
* prevented: a hash table is still allowed to grow if the ratio between
|
|
* the number of elements and the buckets > dict_force_resize_ratio. */
|
|
static int dict_can_resize = 1;
|
|
static unsigned int dict_force_resize_ratio = 5;
|
|
|
|
/* -------------------------- private prototypes ---------------------------- */
|
|
|
|
static int _dictExpandIfNeeded(dict *ht);
|
|
static unsigned long _dictNextPower(unsigned long size);
|
|
static long _dictKeyIndex(dict *ht, const void *key, uint64_t hash, dictEntry **existing);
|
|
static int _dictInit(dict *ht, dictType *type, void *privDataPtr);
|
|
|
|
/* -------------------------- hash functions -------------------------------- */
|
|
|
|
static uint8_t dict_hash_function_seed[16];
|
|
|
|
void dictSetHashFunctionSeed(uint8_t *seed) {
|
|
memcpy(dict_hash_function_seed,seed,sizeof(dict_hash_function_seed));
|
|
}
|
|
|
|
uint8_t *dictGetHashFunctionSeed(void) {
|
|
return dict_hash_function_seed;
|
|
}
|
|
|
|
/* The default hashing function uses SipHash implementation
|
|
* in siphash.c. */
|
|
|
|
uint64_t siphash(const uint8_t *in, const size_t inlen, const uint8_t *k);
|
|
uint64_t siphash_nocase(const uint8_t *in, const size_t inlen, const uint8_t *k);
|
|
|
|
uint64_t dictGenHashFunction(const void *key, int len) {
|
|
return siphash(key,len,dict_hash_function_seed);
|
|
}
|
|
|
|
uint64_t dictGenCaseHashFunction(const unsigned char *buf, int len) {
|
|
return siphash_nocase(buf,len,dict_hash_function_seed);
|
|
}
|
|
|
|
/* ----------------------------- API implementation ------------------------- */
|
|
|
|
/* Reset a hash table already initialized with ht_init().
|
|
* NOTE: This function should only be called by ht_destroy(). */
|
|
static void _dictReset(dictht *ht)
|
|
{
|
|
ht->table = NULL;
|
|
ht->size = 0;
|
|
ht->sizemask = 0;
|
|
ht->used = 0;
|
|
}
|
|
|
|
/* Create a new hash table */
|
|
dict *dictCreate(dictType *type,
|
|
void *privDataPtr)
|
|
{
|
|
dict *d = zmalloc(sizeof(*d));
|
|
|
|
_dictInit(d,type,privDataPtr);
|
|
return d;
|
|
}
|
|
|
|
/* Initialize the hash table */
|
|
int _dictInit(dict *d, dictType *type,
|
|
void *privDataPtr)
|
|
{
|
|
_dictReset(&d->ht[0]);
|
|
_dictReset(&d->ht[1]);
|
|
d->type = type;
|
|
d->privdata = privDataPtr;
|
|
d->rehashidx = -1;
|
|
d->iterators = 0;
|
|
return DICT_OK;
|
|
}
|
|
|
|
/* Resize the table to the minimal size that contains all the elements,
|
|
* but with the invariant of a USED/BUCKETS ratio near to <= 1 */
|
|
int dictResize(dict *d)
|
|
{
|
|
unsigned long minimal;
|
|
|
|
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
|
|
minimal = d->ht[0].used;
|
|
if (minimal < DICT_HT_INITIAL_SIZE)
|
|
minimal = DICT_HT_INITIAL_SIZE;
|
|
return dictExpand(d, minimal);
|
|
}
|
|
|
|
/* Expand or create the hash table */
|
|
int dictExpand(dict *d, unsigned long size)
|
|
{
|
|
/* the size is invalid if it is smaller than the number of
|
|
* elements already inside the hash table */
|
|
if (dictIsRehashing(d) || d->ht[0].used > size)
|
|
return DICT_ERR;
|
|
|
|
dictht n; /* the new hash table */
|
|
unsigned long realsize = _dictNextPower(size);
|
|
|
|
/* Rehashing to the same table size is not useful. */
|
|
if (realsize == d->ht[0].size) return DICT_ERR;
|
|
|
|
/* Allocate the new hash table and initialize all pointers to NULL */
|
|
n.size = realsize;
|
|
n.sizemask = realsize-1;
|
|
n.table = zcalloc(realsize*sizeof(dictEntry*));
|
|
n.used = 0;
|
|
|
|
/* Is this the first initialization? If so it's not really a rehashing
|
|
* we just set the first hash table so that it can accept keys. */
|
|
if (d->ht[0].table == NULL) {
|
|
d->ht[0] = n;
|
|
return DICT_OK;
|
|
}
|
|
|
|
/* Prepare a second hash table for incremental rehashing */
|
|
d->ht[1] = n;
|
|
d->rehashidx = 0;
|
|
return DICT_OK;
|
|
}
|
|
|
|
/* Performs N steps of incremental rehashing. Returns 1 if there are still
|
|
* keys to move from the old to the new hash table, otherwise 0 is returned.
|
|
*
|
|
* Note that a rehashing step consists in moving a bucket (that may have more
|
|
* than one key as we use chaining) from the old to the new hash table, however
|
|
* since part of the hash table may be composed of empty spaces, it is not
|
|
* guaranteed that this function will rehash even a single bucket, since it
|
|
* will visit at max N*10 empty buckets in total, otherwise the amount of
|
|
* work it does would be unbound and the function may block for a long time. */
|
|
int dictRehash(dict *d, int n) {
|
|
int empty_visits = n*10; /* Max number of empty buckets to visit. */
|
|
if (!dictIsRehashing(d)) return 0;
|
|
|
|
while(n-- && d->ht[0].used != 0) {
|
|
dictEntry *de, *nextde;
|
|
|
|
/* Note that rehashidx can't overflow as we are sure there are more
|
|
* elements because ht[0].used != 0 */
|
|
assert(d->ht[0].size > (unsigned long)d->rehashidx);
|
|
while(d->ht[0].table[d->rehashidx] == NULL) {
|
|
d->rehashidx++;
|
|
if (--empty_visits == 0) return 1;
|
|
}
|
|
de = d->ht[0].table[d->rehashidx];
|
|
/* Move all the keys in this bucket from the old to the new hash HT */
|
|
while(de) {
|
|
uint64_t h;
|
|
|
|
nextde = de->next;
|
|
/* Get the index in the new hash table */
|
|
h = dictHashKey(d, de->key) & d->ht[1].sizemask;
|
|
de->next = d->ht[1].table[h];
|
|
d->ht[1].table[h] = de;
|
|
d->ht[0].used--;
|
|
d->ht[1].used++;
|
|
de = nextde;
|
|
}
|
|
d->ht[0].table[d->rehashidx] = NULL;
|
|
d->rehashidx++;
|
|
}
|
|
|
|
/* Check if we already rehashed the whole table... */
|
|
if (d->ht[0].used == 0) {
|
|
zfree(d->ht[0].table);
|
|
d->ht[0] = d->ht[1];
|
|
_dictReset(&d->ht[1]);
|
|
d->rehashidx = -1;
|
|
return 0;
|
|
}
|
|
|
|
/* More to rehash... */
|
|
return 1;
|
|
}
|
|
|
|
long long timeInMilliseconds(void) {
|
|
struct timeval tv;
|
|
|
|
gettimeofday(&tv,NULL);
|
|
return (((long long)tv.tv_sec)*1000)+(tv.tv_usec/1000);
|
|
}
|
|
|
|
/* Rehash in ms+"delta" milliseconds. The value of "delta" is larger
|
|
* than 0, and is smaller than 1 in most cases. The exact upper bound
|
|
* depends on the running time of dictRehash(d,100).*/
|
|
int dictRehashMilliseconds(dict *d, int ms) {
|
|
if (d->iterators > 0) return 0;
|
|
|
|
long long start = timeInMilliseconds();
|
|
int rehashes = 0;
|
|
|
|
while(dictRehash(d,100)) {
|
|
rehashes += 100;
|
|
if (timeInMilliseconds()-start > ms) break;
|
|
}
|
|
return rehashes;
|
|
}
|
|
|
|
/* This function performs just a step of rehashing, and only if there are
|
|
* no safe iterators bound to our hash table. When we have iterators in the
|
|
* middle of a rehashing we can't mess with the two hash tables otherwise
|
|
* some element can be missed or duplicated.
|
|
*
|
|
* This function is called by common lookup or update operations in the
|
|
* dictionary so that the hash table automatically migrates from H1 to H2
|
|
* while it is actively used. */
|
|
static void _dictRehashStep(dict *d) {
|
|
if (d->iterators == 0) dictRehash(d,1);
|
|
}
|
|
|
|
/* Add an element to the target hash table */
|
|
int dictAdd(dict *d, void *key, void *val)
|
|
{
|
|
dictEntry *entry = dictAddRaw(d,key,NULL);
|
|
|
|
if (!entry) return DICT_ERR;
|
|
dictSetVal(d, entry, val);
|
|
return DICT_OK;
|
|
}
|
|
|
|
/* Low level add or find:
|
|
* This function adds the entry but instead of setting a value returns the
|
|
* dictEntry structure to the user, that will make sure to fill the value
|
|
* field as he wishes.
|
|
*
|
|
* This function is also directly exposed to the user API to be called
|
|
* mainly in order to store non-pointers inside the hash value, example:
|
|
*
|
|
* entry = dictAddRaw(dict,mykey,NULL);
|
|
* if (entry != NULL) dictSetSignedIntegerVal(entry,1000);
|
|
*
|
|
* Return values:
|
|
*
|
|
* If key already exists NULL is returned, and "*existing" is populated
|
|
* with the existing entry if existing is not NULL.
|
|
*
|
|
* If key was added, the hash entry is returned to be manipulated by the caller.
|
|
*/
|
|
dictEntry *dictAddRaw(dict *d, void *key, dictEntry **existing)
|
|
{
|
|
long index;
|
|
dictEntry *entry;
|
|
dictht *ht;
|
|
|
|
if (dictIsRehashing(d)) _dictRehashStep(d);
|
|
|
|
/* Get the index of the new element, or -1 if
|
|
* the element already exists. */
|
|
if ((index = _dictKeyIndex(d, key, dictHashKey(d,key), existing)) == -1)
|
|
return NULL;
|
|
|
|
/* Allocate the memory and store the new entry.
|
|
* Insert the element in top, with the assumption that in a database
|
|
* system it is more likely that recently added entries are accessed
|
|
* more frequently. */
|
|
ht = dictIsRehashing(d) ? &d->ht[1] : &d->ht[0];
|
|
entry = zmalloc(sizeof(*entry));
|
|
entry->next = ht->table[index];
|
|
ht->table[index] = entry;
|
|
ht->used++;
|
|
|
|
/* Set the hash entry fields. */
|
|
dictSetKey(d, entry, key);
|
|
return entry;
|
|
}
|
|
|
|
/* Add or Overwrite:
|
|
* Add an element, discarding the old value if the key already exists.
|
|
* Return 1 if the key was added from scratch, 0 if there was already an
|
|
* element with such key and dictReplace() just performed a value update
|
|
* operation. */
|
|
int dictReplace(dict *d, void *key, void *val)
|
|
{
|
|
dictEntry *entry, *existing, auxentry;
|
|
|
|
/* Try to add the element. If the key
|
|
* does not exists dictAdd will succeed. */
|
|
entry = dictAddRaw(d,key,&existing);
|
|
if (entry) {
|
|
dictSetVal(d, entry, val);
|
|
return 1;
|
|
}
|
|
|
|
/* Set the new value and free the old one. Note that it is important
|
|
* to do that in this order, as the value may just be exactly the same
|
|
* as the previous one. In this context, think to reference counting,
|
|
* you want to increment (set), and then decrement (free), and not the
|
|
* reverse. */
|
|
auxentry = *existing;
|
|
dictSetVal(d, existing, val);
|
|
dictFreeVal(d, &auxentry);
|
|
return 0;
|
|
}
|
|
|
|
/* Add or Find:
|
|
* dictAddOrFind() is simply a version of dictAddRaw() that always
|
|
* returns the hash entry of the specified key, even if the key already
|
|
* exists and can't be added (in that case the entry of the already
|
|
* existing key is returned.)
|
|
*
|
|
* See dictAddRaw() for more information. */
|
|
dictEntry *dictAddOrFind(dict *d, void *key) {
|
|
dictEntry *entry, *existing;
|
|
entry = dictAddRaw(d,key,&existing);
|
|
return entry ? entry : existing;
|
|
}
|
|
|
|
/* Search and remove an element. This is an helper function for
|
|
* dictDelete() and dictUnlink(), please check the top comment
|
|
* of those functions. */
|
|
static dictEntry *dictGenericDelete(dict *d, const void *key, int nofree) {
|
|
uint64_t h, idx;
|
|
dictEntry *he, *prevHe;
|
|
int table;
|
|
|
|
if (d->ht[0].used == 0 && d->ht[1].used == 0) return NULL;
|
|
|
|
if (dictIsRehashing(d)) _dictRehashStep(d);
|
|
h = dictHashKey(d, key);
|
|
|
|
for (table = 0; table <= 1; table++) {
|
|
idx = h & d->ht[table].sizemask;
|
|
he = d->ht[table].table[idx];
|
|
prevHe = NULL;
|
|
while(he) {
|
|
if (key==he->key || dictCompareKeys(d, key, he->key)) {
|
|
/* Unlink the element from the list */
|
|
if (prevHe)
|
|
prevHe->next = he->next;
|
|
else
|
|
d->ht[table].table[idx] = he->next;
|
|
if (!nofree) {
|
|
dictFreeKey(d, he);
|
|
dictFreeVal(d, he);
|
|
zfree(he);
|
|
}
|
|
d->ht[table].used--;
|
|
return he;
|
|
}
|
|
prevHe = he;
|
|
he = he->next;
|
|
}
|
|
if (!dictIsRehashing(d)) break;
|
|
}
|
|
return NULL; /* not found */
|
|
}
|
|
|
|
/* Remove an element, returning DICT_OK on success or DICT_ERR if the
|
|
* element was not found. */
|
|
int dictDelete(dict *ht, const void *key) {
|
|
return dictGenericDelete(ht,key,0) ? DICT_OK : DICT_ERR;
|
|
}
|
|
|
|
/* Remove an element from the table, but without actually releasing
|
|
* the key, value and dictionary entry. The dictionary entry is returned
|
|
* if the element was found (and unlinked from the table), and the user
|
|
* should later call `dictFreeUnlinkedEntry()` with it in order to release it.
|
|
* Otherwise if the key is not found, NULL is returned.
|
|
*
|
|
* This function is useful when we want to remove something from the hash
|
|
* table but want to use its value before actually deleting the entry.
|
|
* Without this function the pattern would require two lookups:
|
|
*
|
|
* entry = dictFind(...);
|
|
* // Do something with entry
|
|
* dictDelete(dictionary,entry);
|
|
*
|
|
* Thanks to this function it is possible to avoid this, and use
|
|
* instead:
|
|
*
|
|
* entry = dictUnlink(dictionary,entry);
|
|
* // Do something with entry
|
|
* dictFreeUnlinkedEntry(entry); // <- This does not need to lookup again.
|
|
*/
|
|
dictEntry *dictUnlink(dict *ht, const void *key) {
|
|
return dictGenericDelete(ht,key,1);
|
|
}
|
|
|
|
/* You need to call this function to really free the entry after a call
|
|
* to dictUnlink(). It's safe to call this function with 'he' = NULL. */
|
|
void dictFreeUnlinkedEntry(dict *d, dictEntry *he) {
|
|
if (he == NULL) return;
|
|
dictFreeKey(d, he);
|
|
dictFreeVal(d, he);
|
|
zfree(he);
|
|
}
|
|
|
|
/* Destroy an entire dictionary */
|
|
int _dictClear(dict *d, dictht *ht, void(callback)(void *)) {
|
|
unsigned long i;
|
|
|
|
/* Free all the elements */
|
|
for (i = 0; i < ht->size && ht->used > 0; i++) {
|
|
dictEntry *he, *nextHe;
|
|
|
|
if (callback && (i & 65535) == 0) callback(d->privdata);
|
|
|
|
if ((he = ht->table[i]) == NULL) continue;
|
|
while(he) {
|
|
nextHe = he->next;
|
|
dictFreeKey(d, he);
|
|
dictFreeVal(d, he);
|
|
zfree(he);
|
|
ht->used--;
|
|
he = nextHe;
|
|
}
|
|
}
|
|
/* Free the table and the allocated cache structure */
|
|
zfree(ht->table);
|
|
/* Re-initialize the table */
|
|
_dictReset(ht);
|
|
return DICT_OK; /* never fails */
|
|
}
|
|
|
|
/* Clear & Release the hash table */
|
|
void dictRelease(dict *d)
|
|
{
|
|
_dictClear(d,&d->ht[0],NULL);
|
|
_dictClear(d,&d->ht[1],NULL);
|
|
zfree(d);
|
|
}
|
|
|
|
dictEntry *dictFind(dict *d, const void *key)
|
|
{
|
|
dictEntry *he;
|
|
uint64_t h, idx, table;
|
|
|
|
if (dictSize(d) == 0) return NULL; /* dict is empty */
|
|
if (dictIsRehashing(d)) _dictRehashStep(d);
|
|
h = dictHashKey(d, key);
|
|
for (table = 0; table <= 1; table++) {
|
|
idx = h & d->ht[table].sizemask;
|
|
he = d->ht[table].table[idx];
|
|
while(he) {
|
|
if (key==he->key || dictCompareKeys(d, key, he->key))
|
|
return he;
|
|
he = he->next;
|
|
}
|
|
if (!dictIsRehashing(d)) return NULL;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
void *dictFetchValue(dict *d, const void *key) {
|
|
dictEntry *he;
|
|
|
|
he = dictFind(d,key);
|
|
return he ? dictGetVal(he) : NULL;
|
|
}
|
|
|
|
/* A fingerprint is a 64 bit number that represents the state of the dictionary
|
|
* at a given time, it's just a few dict properties xored together.
|
|
* When an unsafe iterator is initialized, we get the dict fingerprint, and check
|
|
* the fingerprint again when the iterator is released.
|
|
* If the two fingerprints are different it means that the user of the iterator
|
|
* performed forbidden operations against the dictionary while iterating. */
|
|
long long dictFingerprint(dict *d) {
|
|
long long integers[6], hash = 0;
|
|
int j;
|
|
|
|
integers[0] = (long) d->ht[0].table;
|
|
integers[1] = d->ht[0].size;
|
|
integers[2] = d->ht[0].used;
|
|
integers[3] = (long) d->ht[1].table;
|
|
integers[4] = d->ht[1].size;
|
|
integers[5] = d->ht[1].used;
|
|
|
|
/* We hash N integers by summing every successive integer with the integer
|
|
* hashing of the previous sum. Basically:
|
|
*
|
|
* Result = hash(hash(hash(int1)+int2)+int3) ...
|
|
*
|
|
* This way the same set of integers in a different order will (likely) hash
|
|
* to a different number. */
|
|
for (j = 0; j < 6; j++) {
|
|
hash += integers[j];
|
|
/* For the hashing step we use Tomas Wang's 64 bit integer hash. */
|
|
hash = (~hash) + (hash << 21); // hash = (hash << 21) - hash - 1;
|
|
hash = hash ^ (hash >> 24);
|
|
hash = (hash + (hash << 3)) + (hash << 8); // hash * 265
|
|
hash = hash ^ (hash >> 14);
|
|
hash = (hash + (hash << 2)) + (hash << 4); // hash * 21
|
|
hash = hash ^ (hash >> 28);
|
|
hash = hash + (hash << 31);
|
|
}
|
|
return hash;
|
|
}
|
|
|
|
dictIterator *dictGetIterator(dict *d)
|
|
{
|
|
dictIterator *iter = zmalloc(sizeof(*iter));
|
|
|
|
iter->d = d;
|
|
iter->table = 0;
|
|
iter->index = -1;
|
|
iter->safe = 0;
|
|
iter->entry = NULL;
|
|
iter->nextEntry = NULL;
|
|
return iter;
|
|
}
|
|
|
|
dictIterator *dictGetSafeIterator(dict *d) {
|
|
dictIterator *i = dictGetIterator(d);
|
|
|
|
i->safe = 1;
|
|
return i;
|
|
}
|
|
|
|
dictEntry *dictNext(dictIterator *iter)
|
|
{
|
|
while (1) {
|
|
if (iter->entry == NULL) {
|
|
dictht *ht = &iter->d->ht[iter->table];
|
|
if (iter->index == -1 && iter->table == 0) {
|
|
if (iter->safe)
|
|
iter->d->iterators++;
|
|
else
|
|
iter->fingerprint = dictFingerprint(iter->d);
|
|
}
|
|
iter->index++;
|
|
if (iter->index >= (long) ht->size) {
|
|
if (dictIsRehashing(iter->d) && iter->table == 0) {
|
|
iter->table++;
|
|
iter->index = 0;
|
|
ht = &iter->d->ht[1];
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
iter->entry = ht->table[iter->index];
|
|
} else {
|
|
iter->entry = iter->nextEntry;
|
|
}
|
|
if (iter->entry) {
|
|
/* We need to save the 'next' here, the iterator user
|
|
* may delete the entry we are returning. */
|
|
iter->nextEntry = iter->entry->next;
|
|
return iter->entry;
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
void dictReleaseIterator(dictIterator *iter)
|
|
{
|
|
if (!(iter->index == -1 && iter->table == 0)) {
|
|
if (iter->safe)
|
|
iter->d->iterators--;
|
|
else
|
|
assert(iter->fingerprint == dictFingerprint(iter->d));
|
|
}
|
|
zfree(iter);
|
|
}
|
|
|
|
/* Return a random entry from the hash table. Useful to
|
|
* implement randomized algorithms */
|
|
dictEntry *dictGetRandomKey(dict *d)
|
|
{
|
|
dictEntry *he, *orighe;
|
|
unsigned long h;
|
|
int listlen, listele;
|
|
|
|
if (dictSize(d) == 0) return NULL;
|
|
if (dictIsRehashing(d)) _dictRehashStep(d);
|
|
if (dictIsRehashing(d)) {
|
|
do {
|
|
/* We are sure there are no elements in indexes from 0
|
|
* to rehashidx-1 */
|
|
h = d->rehashidx + (random() % (dictSlots(d) - d->rehashidx));
|
|
he = (h >= d->ht[0].size) ? d->ht[1].table[h - d->ht[0].size] :
|
|
d->ht[0].table[h];
|
|
} while(he == NULL);
|
|
} else {
|
|
do {
|
|
h = random() & d->ht[0].sizemask;
|
|
he = d->ht[0].table[h];
|
|
} while(he == NULL);
|
|
}
|
|
|
|
/* Now we found a non empty bucket, but it is a linked
|
|
* list and we need to get a random element from the list.
|
|
* The only sane way to do so is counting the elements and
|
|
* select a random index. */
|
|
listlen = 0;
|
|
orighe = he;
|
|
while(he) {
|
|
he = he->next;
|
|
listlen++;
|
|
}
|
|
listele = random() % listlen;
|
|
he = orighe;
|
|
while(listele--) he = he->next;
|
|
return he;
|
|
}
|
|
|
|
/* This function samples the dictionary to return a few keys from random
|
|
* locations.
|
|
*
|
|
* It does not guarantee to return all the keys specified in 'count', nor
|
|
* it does guarantee to return non-duplicated elements, however it will make
|
|
* some effort to do both things.
|
|
*
|
|
* Returned pointers to hash table entries are stored into 'des' that
|
|
* points to an array of dictEntry pointers. The array must have room for
|
|
* at least 'count' elements, that is the argument we pass to the function
|
|
* to tell how many random elements we need.
|
|
*
|
|
* The function returns the number of items stored into 'des', that may
|
|
* be less than 'count' if the hash table has less than 'count' elements
|
|
* inside, or if not enough elements were found in a reasonable amount of
|
|
* steps.
|
|
*
|
|
* Note that this function is not suitable when you need a good distribution
|
|
* of the returned items, but only when you need to "sample" a given number
|
|
* of continuous elements to run some kind of algorithm or to produce
|
|
* statistics. However the function is much faster than dictGetRandomKey()
|
|
* at producing N elements. */
|
|
unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) {
|
|
unsigned long j; /* internal hash table id, 0 or 1. */
|
|
unsigned long tables; /* 1 or 2 tables? */
|
|
unsigned long stored = 0, maxsizemask;
|
|
unsigned long maxsteps;
|
|
|
|
if (dictSize(d) < count) count = dictSize(d);
|
|
maxsteps = count*10;
|
|
|
|
/* Try to do a rehashing work proportional to 'count'. */
|
|
for (j = 0; j < count; j++) {
|
|
if (dictIsRehashing(d))
|
|
_dictRehashStep(d);
|
|
else
|
|
break;
|
|
}
|
|
|
|
tables = dictIsRehashing(d) ? 2 : 1;
|
|
maxsizemask = d->ht[0].sizemask;
|
|
if (tables > 1 && maxsizemask < d->ht[1].sizemask)
|
|
maxsizemask = d->ht[1].sizemask;
|
|
|
|
/* Pick a random point inside the larger table. */
|
|
unsigned long i = random() & maxsizemask;
|
|
unsigned long emptylen = 0; /* Continuous empty entries so far. */
|
|
while(stored < count && maxsteps--) {
|
|
for (j = 0; j < tables; j++) {
|
|
/* Invariant of the dict.c rehashing: up to the indexes already
|
|
* visited in ht[0] during the rehashing, there are no populated
|
|
* buckets, so we can skip ht[0] for indexes between 0 and idx-1. */
|
|
if (tables == 2 && j == 0 && i < (unsigned long) d->rehashidx) {
|
|
/* Moreover, if we are currently out of range in the second
|
|
* table, there will be no elements in both tables up to
|
|
* the current rehashing index, so we jump if possible.
|
|
* (this happens when going from big to small table). */
|
|
if (i >= d->ht[1].size)
|
|
i = d->rehashidx;
|
|
else
|
|
continue;
|
|
}
|
|
if (i >= d->ht[j].size) continue; /* Out of range for this table. */
|
|
dictEntry *he = d->ht[j].table[i];
|
|
|
|
/* Count contiguous empty buckets, and jump to other
|
|
* locations if they reach 'count' (with a minimum of 5). */
|
|
if (he == NULL) {
|
|
emptylen++;
|
|
if (emptylen >= 5 && emptylen > count) {
|
|
i = random() & maxsizemask;
|
|
emptylen = 0;
|
|
}
|
|
} else {
|
|
emptylen = 0;
|
|
while (he) {
|
|
/* Collect all the elements of the buckets found non
|
|
* empty while iterating. */
|
|
*des = he;
|
|
des++;
|
|
he = he->next;
|
|
stored++;
|
|
if (stored == count) return stored;
|
|
}
|
|
}
|
|
}
|
|
i = (i+1) & maxsizemask;
|
|
}
|
|
return stored;
|
|
}
|
|
|
|
/* This is like dictGetRandomKey() from the POV of the API, but will do more
|
|
* work to ensure a better distribution of the returned element.
|
|
*
|
|
* This function improves the distribution because the dictGetRandomKey()
|
|
* problem is that it selects a random bucket, then it selects a random
|
|
* element from the chain in the bucket. However elements being in different
|
|
* chain lengths will have different probabilities of being reported. With
|
|
* this function instead what we do is to consider a "linear" range of the table
|
|
* that may be constituted of N buckets with chains of different lengths
|
|
* appearing one after the other. Then we report a random element in the range.
|
|
* In this way we smooth away the problem of different chain lengths. */
|
|
#define GETFAIR_NUM_ENTRIES 15
|
|
dictEntry *dictGetFairRandomKey(dict *d) {
|
|
dictEntry *entries[GETFAIR_NUM_ENTRIES];
|
|
unsigned int count = dictGetSomeKeys(d,entries,GETFAIR_NUM_ENTRIES);
|
|
/* Note that dictGetSomeKeys() may return zero elements in an unlucky
|
|
* run() even if there are actually elements inside the hash table. So
|
|
* when we get zero, we call the true dictGetRandomKey() that will always
|
|
* yeld the element if the hash table has at least one. */
|
|
if (count == 0) return dictGetRandomKey(d);
|
|
unsigned int idx = rand() % count;
|
|
return entries[idx];
|
|
}
|
|
|
|
/* Function to reverse bits. Algorithm from:
|
|
* http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel */
|
|
static unsigned long rev(unsigned long v) {
|
|
unsigned long s = CHAR_BIT * sizeof(v); // bit size; must be power of 2
|
|
unsigned long mask = ~0UL;
|
|
while ((s >>= 1) > 0) {
|
|
mask ^= (mask << s);
|
|
v = ((v >> s) & mask) | ((v << s) & ~mask);
|
|
}
|
|
return v;
|
|
}
|
|
|
|
/* dictScan() is used to iterate over the elements of a dictionary.
|
|
*
|
|
* Iterating works the following way:
|
|
*
|
|
* 1) Initially you call the function using a cursor (v) value of 0.
|
|
* 2) The function performs one step of the iteration, and returns the
|
|
* new cursor value you must use in the next call.
|
|
* 3) When the returned cursor is 0, the iteration is complete.
|
|
*
|
|
* The function guarantees all elements present in the
|
|
* dictionary get returned between the start and end of the iteration.
|
|
* However it is possible some elements get returned multiple times.
|
|
*
|
|
* For every element returned, the callback argument 'fn' is
|
|
* called with 'privdata' as first argument and the dictionary entry
|
|
* 'de' as second argument.
|
|
*
|
|
* HOW IT WORKS.
|
|
*
|
|
* The iteration algorithm was designed by Pieter Noordhuis.
|
|
* The main idea is to increment a cursor starting from the higher order
|
|
* bits. That is, instead of incrementing the cursor normally, the bits
|
|
* of the cursor are reversed, then the cursor is incremented, and finally
|
|
* the bits are reversed again.
|
|
*
|
|
* This strategy is needed because the hash table may be resized between
|
|
* iteration calls.
|
|
*
|
|
* dict.c hash tables are always power of two in size, and they
|
|
* use chaining, so the position of an element in a given table is given
|
|
* by computing the bitwise AND between Hash(key) and SIZE-1
|
|
* (where SIZE-1 is always the mask that is equivalent to taking the rest
|
|
* of the division between the Hash of the key and SIZE).
|
|
*
|
|
* For example if the current hash table size is 16, the mask is
|
|
* (in binary) 1111. The position of a key in the hash table will always be
|
|
* the last four bits of the hash output, and so forth.
|
|
*
|
|
* WHAT HAPPENS IF THE TABLE CHANGES IN SIZE?
|
|
*
|
|
* If the hash table grows, elements can go anywhere in one multiple of
|
|
* the old bucket: for example let's say we already iterated with
|
|
* a 4 bit cursor 1100 (the mask is 1111 because hash table size = 16).
|
|
*
|
|
* If the hash table will be resized to 64 elements, then the new mask will
|
|
* be 111111. The new buckets you obtain by substituting in ??1100
|
|
* with either 0 or 1 can be targeted only by keys we already visited
|
|
* when scanning the bucket 1100 in the smaller hash table.
|
|
*
|
|
* By iterating the higher bits first, because of the inverted counter, the
|
|
* cursor does not need to restart if the table size gets bigger. It will
|
|
* continue iterating using cursors without '1100' at the end, and also
|
|
* without any other combination of the final 4 bits already explored.
|
|
*
|
|
* Similarly when the table size shrinks over time, for example going from
|
|
* 16 to 8, if a combination of the lower three bits (the mask for size 8
|
|
* is 111) were already completely explored, it would not be visited again
|
|
* because we are sure we tried, for example, both 0111 and 1111 (all the
|
|
* variations of the higher bit) so we don't need to test it again.
|
|
*
|
|
* WAIT... YOU HAVE *TWO* TABLES DURING REHASHING!
|
|
*
|
|
* Yes, this is true, but we always iterate the smaller table first, then
|
|
* we test all the expansions of the current cursor into the larger
|
|
* table. For example if the current cursor is 101 and we also have a
|
|
* larger table of size 16, we also test (0)101 and (1)101 inside the larger
|
|
* table. This reduces the problem back to having only one table, where
|
|
* the larger one, if it exists, is just an expansion of the smaller one.
|
|
*
|
|
* LIMITATIONS
|
|
*
|
|
* This iterator is completely stateless, and this is a huge advantage,
|
|
* including no additional memory used.
|
|
*
|
|
* The disadvantages resulting from this design are:
|
|
*
|
|
* 1) It is possible we return elements more than once. However this is usually
|
|
* easy to deal with in the application level.
|
|
* 2) The iterator must return multiple elements per call, as it needs to always
|
|
* return all the keys chained in a given bucket, and all the expansions, so
|
|
* we are sure we don't miss keys moving during rehashing.
|
|
* 3) The reverse cursor is somewhat hard to understand at first, but this
|
|
* comment is supposed to help.
|
|
*/
|
|
unsigned long dictScan(dict *d,
|
|
unsigned long v,
|
|
dictScanFunction *fn,
|
|
dictScanBucketFunction* bucketfn,
|
|
void *privdata)
|
|
{
|
|
dictht *t0, *t1;
|
|
const dictEntry *de, *next;
|
|
unsigned long m0, m1;
|
|
|
|
if (dictSize(d) == 0) return 0;
|
|
|
|
/* Having a safe iterator means no rehashing can happen, see _dictRehashStep.
|
|
* This is needed in case the scan callback tries to do dictFind or alike. */
|
|
d->iterators++;
|
|
|
|
if (!dictIsRehashing(d)) {
|
|
t0 = &(d->ht[0]);
|
|
m0 = t0->sizemask;
|
|
|
|
/* Emit entries at cursor */
|
|
if (bucketfn) bucketfn(privdata, &t0->table[v & m0]);
|
|
de = t0->table[v & m0];
|
|
while (de) {
|
|
next = de->next;
|
|
fn(privdata, de);
|
|
de = next;
|
|
}
|
|
|
|
/* Set unmasked bits so incrementing the reversed cursor
|
|
* operates on the masked bits */
|
|
v |= ~m0;
|
|
|
|
/* Increment the reverse cursor */
|
|
v = rev(v);
|
|
v++;
|
|
v = rev(v);
|
|
|
|
} else {
|
|
t0 = &d->ht[0];
|
|
t1 = &d->ht[1];
|
|
|
|
/* Make sure t0 is the smaller and t1 is the bigger table */
|
|
if (t0->size > t1->size) {
|
|
t0 = &d->ht[1];
|
|
t1 = &d->ht[0];
|
|
}
|
|
|
|
m0 = t0->sizemask;
|
|
m1 = t1->sizemask;
|
|
|
|
/* Emit entries at cursor */
|
|
if (bucketfn) bucketfn(privdata, &t0->table[v & m0]);
|
|
de = t0->table[v & m0];
|
|
while (de) {
|
|
next = de->next;
|
|
fn(privdata, de);
|
|
de = next;
|
|
}
|
|
|
|
/* Iterate over indices in larger table that are the expansion
|
|
* of the index pointed to by the cursor in the smaller table */
|
|
do {
|
|
/* Emit entries at cursor */
|
|
if (bucketfn) bucketfn(privdata, &t1->table[v & m1]);
|
|
de = t1->table[v & m1];
|
|
while (de) {
|
|
next = de->next;
|
|
fn(privdata, de);
|
|
de = next;
|
|
}
|
|
|
|
/* Increment the reverse cursor not covered by the smaller mask.*/
|
|
v |= ~m1;
|
|
v = rev(v);
|
|
v++;
|
|
v = rev(v);
|
|
|
|
/* Continue while bits covered by mask difference is non-zero */
|
|
} while (v & (m0 ^ m1));
|
|
}
|
|
|
|
/* undo the ++ at the top */
|
|
d->iterators--;
|
|
|
|
return v;
|
|
}
|
|
|
|
/* ------------------------- private functions ------------------------------ */
|
|
|
|
/* Expand the hash table if needed */
|
|
static int _dictExpandIfNeeded(dict *d)
|
|
{
|
|
/* Incremental rehashing already in progress. Return. */
|
|
if (dictIsRehashing(d)) return DICT_OK;
|
|
|
|
/* If the hash table is empty expand it to the initial size. */
|
|
if (d->ht[0].size == 0) return dictExpand(d, DICT_HT_INITIAL_SIZE);
|
|
|
|
/* If we reached the 1:1 ratio, and we are allowed to resize the hash
|
|
* table (global setting) or we should avoid it but the ratio between
|
|
* elements/buckets is over the "safe" threshold, we resize doubling
|
|
* the number of buckets. */
|
|
if (d->ht[0].used >= d->ht[0].size &&
|
|
(dict_can_resize ||
|
|
d->ht[0].used/d->ht[0].size > dict_force_resize_ratio))
|
|
{
|
|
return dictExpand(d, d->ht[0].used*2);
|
|
}
|
|
return DICT_OK;
|
|
}
|
|
|
|
/* Our hash table capability is a power of two */
|
|
static unsigned long _dictNextPower(unsigned long size)
|
|
{
|
|
unsigned long i = DICT_HT_INITIAL_SIZE;
|
|
|
|
if (size >= LONG_MAX) return LONG_MAX + 1LU;
|
|
while(1) {
|
|
if (i >= size)
|
|
return i;
|
|
i *= 2;
|
|
}
|
|
}
|
|
|
|
/* Returns the index of a free slot that can be populated with
|
|
* a hash entry for the given 'key'.
|
|
* If the key already exists, -1 is returned
|
|
* and the optional output parameter may be filled.
|
|
*
|
|
* Note that if we are in the process of rehashing the hash table, the
|
|
* index is always returned in the context of the second (new) hash table. */
|
|
static long _dictKeyIndex(dict *d, const void *key, uint64_t hash, dictEntry **existing)
|
|
{
|
|
unsigned long idx, table;
|
|
dictEntry *he;
|
|
if (existing) *existing = NULL;
|
|
|
|
/* Expand the hash table if needed */
|
|
if (_dictExpandIfNeeded(d) == DICT_ERR)
|
|
return -1;
|
|
for (table = 0; table <= 1; table++) {
|
|
idx = hash & d->ht[table].sizemask;
|
|
/* Search if this slot does not already contain the given key */
|
|
he = d->ht[table].table[idx];
|
|
while(he) {
|
|
if (key==he->key || dictCompareKeys(d, key, he->key)) {
|
|
if (existing) *existing = he;
|
|
return -1;
|
|
}
|
|
he = he->next;
|
|
}
|
|
if (!dictIsRehashing(d)) break;
|
|
}
|
|
return idx;
|
|
}
|
|
|
|
void dictEmpty(dict *d, void(callback)(void*)) {
|
|
_dictClear(d,&d->ht[0],callback);
|
|
_dictClear(d,&d->ht[1],callback);
|
|
d->rehashidx = -1;
|
|
d->iterators = 0;
|
|
}
|
|
|
|
void dictEnableResize(void) {
|
|
dict_can_resize = 1;
|
|
}
|
|
|
|
void dictDisableResize(void) {
|
|
dict_can_resize = 0;
|
|
}
|
|
|
|
uint64_t dictGetHash(dict *d, const void *key) {
|
|
return dictHashKey(d, key);
|
|
}
|
|
|
|
/* Finds the dictEntry reference by using pointer and pre-calculated hash.
|
|
* oldkey is a dead pointer and should not be accessed.
|
|
* the hash value should be provided using dictGetHash.
|
|
* no string / key comparison is performed.
|
|
* return value is the reference to the dictEntry if found, or NULL if not found. */
|
|
dictEntry **dictFindEntryRefByPtrAndHash(dict *d, const void *oldptr, uint64_t hash) {
|
|
dictEntry *he, **heref;
|
|
unsigned long idx, table;
|
|
|
|
if (dictSize(d) == 0) return NULL; /* dict is empty */
|
|
for (table = 0; table <= 1; table++) {
|
|
idx = hash & d->ht[table].sizemask;
|
|
heref = &d->ht[table].table[idx];
|
|
he = *heref;
|
|
while(he) {
|
|
if (oldptr==he->key)
|
|
return heref;
|
|
heref = &he->next;
|
|
he = *heref;
|
|
}
|
|
if (!dictIsRehashing(d)) return NULL;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
/* ------------------------------- Debugging ---------------------------------*/
|
|
|
|
#define DICT_STATS_VECTLEN 50
|
|
size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) {
|
|
unsigned long i, slots = 0, chainlen, maxchainlen = 0;
|
|
unsigned long totchainlen = 0;
|
|
unsigned long clvector[DICT_STATS_VECTLEN];
|
|
size_t l = 0;
|
|
|
|
if (ht->used == 0) {
|
|
return snprintf(buf,bufsize,
|
|
"No stats available for empty dictionaries\n");
|
|
}
|
|
|
|
/* Compute stats. */
|
|
for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0;
|
|
for (i = 0; i < ht->size; i++) {
|
|
dictEntry *he;
|
|
|
|
if (ht->table[i] == NULL) {
|
|
clvector[0]++;
|
|
continue;
|
|
}
|
|
slots++;
|
|
/* For each hash entry on this slot... */
|
|
chainlen = 0;
|
|
he = ht->table[i];
|
|
while(he) {
|
|
chainlen++;
|
|
he = he->next;
|
|
}
|
|
clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++;
|
|
if (chainlen > maxchainlen) maxchainlen = chainlen;
|
|
totchainlen += chainlen;
|
|
}
|
|
|
|
/* Generate human readable stats. */
|
|
l += snprintf(buf+l,bufsize-l,
|
|
"Hash table %d stats (%s):\n"
|
|
" table size: %ld\n"
|
|
" number of elements: %ld\n"
|
|
" different slots: %ld\n"
|
|
" max chain length: %ld\n"
|
|
" avg chain length (counted): %.02f\n"
|
|
" avg chain length (computed): %.02f\n"
|
|
" Chain length distribution:\n",
|
|
tableid, (tableid == 0) ? "main hash table" : "rehashing target",
|
|
ht->size, ht->used, slots, maxchainlen,
|
|
(float)totchainlen/slots, (float)ht->used/slots);
|
|
|
|
for (i = 0; i < DICT_STATS_VECTLEN-1; i++) {
|
|
if (clvector[i] == 0) continue;
|
|
if (l >= bufsize) break;
|
|
l += snprintf(buf+l,bufsize-l,
|
|
" %s%ld: %ld (%.02f%%)\n",
|
|
(i == DICT_STATS_VECTLEN-1)?">= ":"",
|
|
i, clvector[i], ((float)clvector[i]/ht->size)*100);
|
|
}
|
|
|
|
/* Unlike snprintf(), return the number of characters actually written. */
|
|
if (bufsize) buf[bufsize-1] = '\0';
|
|
return strlen(buf);
|
|
}
|
|
|
|
void dictGetStats(char *buf, size_t bufsize, dict *d) {
|
|
size_t l;
|
|
char *orig_buf = buf;
|
|
size_t orig_bufsize = bufsize;
|
|
|
|
l = _dictGetStatsHt(buf,bufsize,&d->ht[0],0);
|
|
buf += l;
|
|
bufsize -= l;
|
|
if (dictIsRehashing(d) && bufsize > 0) {
|
|
_dictGetStatsHt(buf,bufsize,&d->ht[1],1);
|
|
}
|
|
/* Make sure there is a NULL term at the end. */
|
|
if (orig_bufsize) orig_buf[orig_bufsize-1] = '\0';
|
|
}
|
|
|
|
/* ------------------------------- Benchmark ---------------------------------*/
|
|
|
|
#ifdef DICT_BENCHMARK_MAIN
|
|
|
|
#include "sds.h"
|
|
|
|
uint64_t hashCallback(const void *key) {
|
|
return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
|
|
}
|
|
|
|
int compareCallback(void *privdata, const void *key1, const void *key2) {
|
|
int l1,l2;
|
|
DICT_NOTUSED(privdata);
|
|
|
|
l1 = sdslen((sds)key1);
|
|
l2 = sdslen((sds)key2);
|
|
if (l1 != l2) return 0;
|
|
return memcmp(key1, key2, l1) == 0;
|
|
}
|
|
|
|
void freeCallback(void *privdata, void *val) {
|
|
DICT_NOTUSED(privdata);
|
|
|
|
sdsfree(val);
|
|
}
|
|
|
|
dictType BenchmarkDictType = {
|
|
hashCallback,
|
|
NULL,
|
|
NULL,
|
|
compareCallback,
|
|
freeCallback,
|
|
NULL
|
|
};
|
|
|
|
#define start_benchmark() start = timeInMilliseconds()
|
|
#define end_benchmark(msg) do { \
|
|
elapsed = timeInMilliseconds()-start; \
|
|
printf(msg ": %ld items in %lld ms\n", count, elapsed); \
|
|
} while(0);
|
|
|
|
/* dict-benchmark [count] */
|
|
int main(int argc, char **argv) {
|
|
long j;
|
|
long long start, elapsed;
|
|
dict *dict = dictCreate(&BenchmarkDictType,NULL);
|
|
long count = 0;
|
|
|
|
if (argc == 2) {
|
|
count = strtol(argv[1],NULL,10);
|
|
} else {
|
|
count = 5000000;
|
|
}
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
int retval = dictAdd(dict,sdsfromlonglong(j),(void*)j);
|
|
assert(retval == DICT_OK);
|
|
}
|
|
end_benchmark("Inserting");
|
|
assert((long)dictSize(dict) == count);
|
|
|
|
/* Wait for rehashing. */
|
|
while (dictIsRehashing(dict)) {
|
|
dictRehashMilliseconds(dict,100);
|
|
}
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
sds key = sdsfromlonglong(j);
|
|
dictEntry *de = dictFind(dict,key);
|
|
assert(de != NULL);
|
|
sdsfree(key);
|
|
}
|
|
end_benchmark("Linear access of existing elements");
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
sds key = sdsfromlonglong(j);
|
|
dictEntry *de = dictFind(dict,key);
|
|
assert(de != NULL);
|
|
sdsfree(key);
|
|
}
|
|
end_benchmark("Linear access of existing elements (2nd round)");
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
sds key = sdsfromlonglong(rand() % count);
|
|
dictEntry *de = dictFind(dict,key);
|
|
assert(de != NULL);
|
|
sdsfree(key);
|
|
}
|
|
end_benchmark("Random access of existing elements");
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
sds key = sdsfromlonglong(rand() % count);
|
|
key[0] = 'X';
|
|
dictEntry *de = dictFind(dict,key);
|
|
assert(de == NULL);
|
|
sdsfree(key);
|
|
}
|
|
end_benchmark("Accessing missing");
|
|
|
|
start_benchmark();
|
|
for (j = 0; j < count; j++) {
|
|
sds key = sdsfromlonglong(j);
|
|
int retval = dictDelete(dict,key);
|
|
assert(retval == DICT_OK);
|
|
key[0] += 17; /* Change first number to letter. */
|
|
retval = dictAdd(dict,key,(void*)j);
|
|
assert(retval == DICT_OK);
|
|
}
|
|
end_benchmark("Removing and adding");
|
|
}
|
|
#endif
|