2024-03-21 09:30:47 -04:00
|
|
|
// SPDX-FileCopyrightText: 2024 Redict Contributors
|
|
|
|
// SPDX-FileCopyrightText: 2024 Salvatore Sanfilippo <antirez at gmail dot com>
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
2024-03-21 15:11:44 -04:00
|
|
|
// SPDX-License-Identifier: LGPL-3.0-only
|
2024-03-21 09:30:47 -04:00
|
|
|
|
2024-03-21 05:49:18 -04:00
|
|
|
#include "redictmodule.h"
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
#include <string.h>
|
2021-01-22 09:19:37 -05:00
|
|
|
#include <strings.h>
|
2018-03-29 06:46:13 -04:00
|
|
|
#include <assert.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2022-10-18 12:50:02 -04:00
|
|
|
#define UNUSED(V) ((void) V)
|
|
|
|
|
2018-03-29 06:46:13 -04:00
|
|
|
#define LIST_SIZE 1024
|
|
|
|
|
2023-02-14 13:06:30 -05:00
|
|
|
/* The FSL (Fixed-Size List) data type is a low-budget imitation of the
|
2024-03-25 07:45:47 -04:00
|
|
|
* native Redict list, in order to test list-like commands implemented
|
2023-02-14 13:06:30 -05:00
|
|
|
* by a module.
|
|
|
|
* Examples: FSL.PUSH, FSL.BPOP, etc. */
|
|
|
|
|
2018-03-29 06:46:13 -04:00
|
|
|
typedef struct {
|
|
|
|
long long list[LIST_SIZE];
|
|
|
|
long long length;
|
|
|
|
} fsl_t; /* Fixed-size list */
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
static RedictModuleType *fsltype = NULL;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2023-05-02 20:31:32 -04:00
|
|
|
fsl_t *fsl_type_create(void) {
|
2018-03-29 06:46:13 -04:00
|
|
|
fsl_t *o;
|
2024-03-25 07:41:50 -04:00
|
|
|
o = RedictModule_Alloc(sizeof(*o));
|
2018-03-29 06:46:13 -04:00
|
|
|
o->length = 0;
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
|
|
|
void fsl_type_free(fsl_t *o) {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Free(o);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ========================== "fsltype" type methods ======================= */
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
void *fsl_rdb_load(RedictModuleIO *rdb, int encver) {
|
2018-03-29 06:46:13 -04:00
|
|
|
if (encver != 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
fsl_t *fsl = fsl_type_create();
|
2024-03-25 07:41:50 -04:00
|
|
|
fsl->length = RedictModule_LoadUnsigned(rdb);
|
2018-03-29 06:46:13 -04:00
|
|
|
for (long long i = 0; i < fsl->length; i++)
|
2024-03-25 07:41:50 -04:00
|
|
|
fsl->list[i] = RedictModule_LoadSigned(rdb);
|
2018-03-29 06:46:13 -04:00
|
|
|
return fsl;
|
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
void fsl_rdb_save(RedictModuleIO *rdb, void *value) {
|
2018-03-29 06:46:13 -04:00
|
|
|
fsl_t *fsl = value;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SaveUnsigned(rdb,fsl->length);
|
2018-03-29 06:46:13 -04:00
|
|
|
for (long long i = 0; i < fsl->length; i++)
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SaveSigned(rdb, fsl->list[i]);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
void fsl_aofrw(RedictModuleIO *aof, RedictModuleString *key, void *value) {
|
2018-03-29 06:46:13 -04:00
|
|
|
fsl_t *fsl = value;
|
|
|
|
for (long long i = 0; i < fsl->length; i++)
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_EmitAOF(aof, "FSL.PUSH","sl", key, fsl->list[i]);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void fsl_free(void *value) {
|
|
|
|
fsl_type_free(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ========================== helper methods ======================= */
|
|
|
|
|
2023-02-14 13:06:30 -05:00
|
|
|
/* Wrapper to the boilerplate code of opening a key, checking its type, etc.
|
|
|
|
* Returns 0 if `keyname` exists in the dataset, but it's of the wrong type (i.e. not FSL) */
|
2024-03-25 07:41:50 -04:00
|
|
|
int get_fsl(RedictModuleCtx *ctx, RedictModuleString *keyname, int mode, int create, fsl_t **fsl, int reply_on_failure) {
|
2023-02-14 13:06:30 -05:00
|
|
|
*fsl = NULL;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, keyname, mode);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_KeyType(key) != REDICTMODULE_KEYTYPE_EMPTY) {
|
2023-02-14 13:06:30 -05:00
|
|
|
/* Key exists */
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_ModuleTypeGetType(key) != fsltype) {
|
2023-02-14 13:06:30 -05:00
|
|
|
/* Key is not FSL */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
2023-02-14 13:06:30 -05:00
|
|
|
if (reply_on_failure)
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithError(ctx, REDICTMODULE_ERRORMSG_WRONGTYPE);
|
|
|
|
RedictModuleCallReply *reply = RedictModule_Call(ctx, "INCR", "c", "fsl_wrong_type");
|
|
|
|
RedictModule_FreeCallReply(reply);
|
2023-02-14 13:06:30 -05:00
|
|
|
return 0;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
2023-02-14 13:06:30 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
*fsl = RedictModule_ModuleTypeGetValue(key);
|
|
|
|
if (*fsl && !(*fsl)->length && mode & REDICTMODULE_WRITE) {
|
2023-02-14 13:06:30 -05:00
|
|
|
/* Key exists, but it's logically empty */
|
|
|
|
if (create) {
|
|
|
|
create = 0; /* No need to create, key exists in its basic state */
|
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_DeleteKey(key);
|
Add reply_schema to command json files (internal for now) (#10273)
Work in progress towards implementing a reply schema as part of COMMAND DOCS, see #9845
Since ironing the details of the reply schema of each and every command can take a long time, we
would like to merge this PR when the infrastructure is ready, and let this mature in the unstable branch.
Meanwhile the changes of this PR are internal, they are part of the repo, but do not affect the produced build.
### Background
In #9656 we add a lot of information about Redis commands, but we are missing information about the replies
### Motivation
1. Documentation. This is the primary goal.
2. It should be possible, based on the output of COMMAND, to be able to generate client code in typed
languages. In order to do that, we need Redis to tell us, in detail, what each reply looks like.
3. We would like to build a fuzzer that verifies the reply structure (for now we use the existing
testsuite, see the "Testing" section)
### Schema
The idea is to supply some sort of schema for the various replies of each command.
The schema will describe the conceptual structure of the reply (for generated clients), as defined in RESP3.
Note that the reply structure itself may change, depending on the arguments (e.g. `XINFO STREAM`, with
and without the `FULL` modifier)
We decided to use the standard json-schema (see https://json-schema.org/) as the reply-schema.
Example for `BZPOPMIN`:
```
"reply_schema": {
"oneOf": [
{
"description": "Timeout reached and no elements were popped.",
"type": "null"
},
{
"description": "The keyname, popped member, and its score.",
"type": "array",
"minItems": 3,
"maxItems": 3,
"items": [
{
"description": "Keyname",
"type": "string"
},
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
]
}
```
#### Notes
1. It is ok that some commands' reply structure depends on the arguments and it's the caller's responsibility
to know which is the relevant one. this comes after looking at other request-reply systems like OpenAPI,
where the reply schema can also be oneOf and the caller is responsible to know which schema is the relevant one.
2. The reply schemas will describe RESP3 replies only. even though RESP3 is structured, we want to use reply
schema for documentation (and possibly to create a fuzzer that validates the replies)
3. For documentation, the description field will include an explanation of the scenario in which the reply is sent,
including any relation to arguments. for example, for `ZRANGE`'s two schemas we will need to state that one
is with `WITHSCORES` and the other is without.
4. For documentation, there will be another optional field "notes" in which we will add a short description of
the representation in RESP2, in case it's not trivial (RESP3's `ZRANGE`'s nested array vs. RESP2's flat
array, for example)
Given the above:
1. We can generate the "return" section of all commands in [redis-doc](https://redis.io/commands/)
(given that "description" and "notes" are comprehensive enough)
2. We can generate a client in a strongly typed language (but the return type could be a conceptual
`union` and the caller needs to know which schema is relevant). see the section below for RESP2 support.
3. We can create a fuzzer for RESP3.
### Limitations (because we are using the standard json-schema)
The problem is that Redis' replies are more diverse than what the json format allows. This means that,
when we convert the reply to a json (in order to validate the schema against it), we lose information (see
the "Testing" section below).
The other option would have been to extend the standard json-schema (and json format) to include stuff
like sets, bulk-strings, error-string, etc. but that would mean also extending the schema-validator - and that
seemed like too much work, so we decided to compromise.
Examples:
1. We cannot tell the difference between an "array" and a "set"
2. We cannot tell the difference between simple-string and bulk-string
3. we cannot verify true uniqueness of items in commands like ZRANGE: json-schema doesn't cover the
case of two identical members with different scores (e.g. `[["m1",6],["m1",7]]`) because `uniqueItems`
compares (member,score) tuples and not just the member name.
### Testing
This commit includes some changes inside Redis in order to verify the schemas (existing and future ones)
are indeed correct (i.e. describe the actual response of Redis).
To do that, we added a debugging feature to Redis that causes it to produce a log of all the commands
it executed and their replies.
For that, Redis needs to be compiled with `-DLOG_REQ_RES` and run with
`--reg-res-logfile <file> --client-default-resp 3` (the testsuite already does that if you run it with
`--log-req-res --force-resp3`)
You should run the testsuite with the above args (and `--dont-clean`) in order to make Redis generate
`.reqres` files (same dir as the `stdout` files) which contain request-response pairs.
These files are later on processed by `./utils/req-res-log-validator.py` which does:
1. Goes over req-res files, generated by redis-servers, spawned by the testsuite (see logreqres.c)
2. For each request-response pair, it validates the response against the request's reply_schema
(obtained from the extended COMMAND DOCS)
5. In order to get good coverage of the Redis commands, and all their different replies, we chose to use
the existing redis test suite, rather than attempt to write a fuzzer.
#### Notes about RESP2
1. We will not be able to use the testing tool to verify RESP2 replies (we are ok with that, it's time to
accept RESP3 as the future RESP)
2. Since the majority of the test suite is using RESP2, and we want the server to reply with RESP3
so that we can validate it, we will need to know how to convert the actual reply to the one expected.
- number and boolean are always strings in RESP2 so the conversion is easy
- objects (maps) are always a flat array in RESP2
- others (nested array in RESP3's `ZRANGE` and others) will need some special per-command
handling (so the client will not be totally auto-generated)
Example for ZRANGE:
```
"reply_schema": {
"anyOf": [
{
"description": "A list of member elements",
"type": "array",
"uniqueItems": true,
"items": {
"type": "string"
}
},
{
"description": "Members and their scores. Returned in case `WITHSCORES` was used.",
"notes": "In RESP2 this is returned as a flat array",
"type": "array",
"uniqueItems": true,
"items": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{
"description": "Member",
"type": "string"
},
{
"description": "Score",
"type": "number"
}
]
}
}
]
}
```
### Other changes
1. Some tests that behave differently depending on the RESP are now being tested for both RESP,
regardless of the special log-req-res mode ("Pub/Sub PING" for example)
2. Update the history field of CLIENT LIST
3. Added basic tests for commands that were not covered at all by the testsuite
### TODO
- [x] (maybe a different PR) add a "condition" field to anyOf/oneOf schemas that refers to args. e.g.
when `SET` return NULL, the condition is `arguments.get||arguments.condition`, for `OK` the condition
is `!arguments.get`, and for `string` the condition is `arguments.get` - https://github.com/redis/redis/issues/11896
- [x] (maybe a different PR) also run `runtest-cluster` in the req-res logging mode
- [x] add the new tests to GH actions (i.e. compile with `-DLOG_REQ_RES`, run the tests, and run the validator)
- [x] (maybe a different PR) figure out a way to warn about (sub)schemas that are uncovered by the output
of the tests - https://github.com/redis/redis/issues/11897
- [x] (probably a separate PR) add all missing schemas
- [x] check why "SDOWN is triggered by misconfigured instance replying with errors" fails with --log-req-res
- [x] move the response transformers to their own file (run both regular, cluster, and sentinel tests - need to
fight with the tcl including mechanism a bit)
- [x] issue: module API - https://github.com/redis/redis/issues/11898
- [x] (probably a separate PR): improve schemas: add `required` to `object`s - https://github.com/redis/redis/issues/11899
Co-authored-by: Ozan Tezcan <ozantezcan@gmail.com>
Co-authored-by: Hanna Fadida <hanna.fadida@redislabs.com>
Co-authored-by: Oran Agra <oran@redislabs.com>
Co-authored-by: Shaya Potter <shaya@redislabs.com>
2023-03-11 03:14:16 -05:00
|
|
|
*fsl = NULL;
|
2023-02-14 13:06:30 -05:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Key exists, and has elements in it - no need to create anything */
|
|
|
|
create = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (create) {
|
2018-03-29 06:46:13 -04:00
|
|
|
*fsl = fsl_type_create();
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ModuleTypeSetValue(key, fsltype, *fsl);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
2018-03-29 06:46:13 -04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ========================== commands ======================= */
|
|
|
|
|
|
|
|
/* FSL.PUSH <key> <int> - Push an integer to the fixed-size list (to the right).
|
|
|
|
* It must be greater than the element in the head of the list. */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_push(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2018-03-29 06:46:13 -04:00
|
|
|
if (argc != 3)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
long long ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[2],&ele) != REDICTMODULE_OK)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid integer");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_WRITE, 1, &fsl, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
if (fsl->length == LIST_SIZE)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR list is full");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
if (fsl->length != 0 && fsl->list[fsl->length-1] >= ele)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR new element has to be greater than the head element");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
fsl->list[fsl->length++] = ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SignalKeyAsReady(ctx, argv[1]);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
2023-05-28 03:10:52 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "OK");
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
typedef struct {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleString *keyname;
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
long long ele;
|
|
|
|
} timer_data_t;
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
static void timer_callback(RedictModuleCtx *ctx, void *data)
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
{
|
|
|
|
timer_data_t *td = data;
|
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, td->keyname, REDICTMODULE_WRITE, 1, &fsl, 1))
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (fsl->length == LIST_SIZE)
|
|
|
|
return; /* list is full */
|
|
|
|
|
|
|
|
if (fsl->length != 0 && fsl->list[fsl->length-1] >= td->ele)
|
|
|
|
return; /* new element has to be greater than the head element */
|
|
|
|
|
|
|
|
fsl->list[fsl->length++] = td->ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SignalKeyAsReady(ctx, td->keyname);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Replicate(ctx, "FSL.PUSH", "sl", td->keyname, td->ele);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_FreeString(ctx, td->keyname);
|
|
|
|
RedictModule_Free(td);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FSL.PUSHTIMER <key> <int> <period-in-ms> - Push the number 9000 to the fixed-size list (to the right).
|
|
|
|
* It must be greater than the element in the head of the list. */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_pushtimer(RedictModuleCtx *ctx, RedictModuleString **argv, int argc)
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
{
|
|
|
|
if (argc != 4)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
|
|
|
long long ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[2],&ele) != REDICTMODULE_OK)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid integer");
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
|
|
|
long long period;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[3],&period) != REDICTMODULE_OK)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid period");
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_WRITE, 1, &fsl, 1))
|
|
|
|
return REDICTMODULE_OK;
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
|
|
|
if (fsl->length == LIST_SIZE)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR list is full");
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
timer_data_t *td = RedictModule_Alloc(sizeof(*td));
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
td->keyname = argv[1];
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_RetainString(ctx, td->keyname);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
td->ele = ele;
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleTimerID id = RedictModule_CreateTimer(ctx, period, timer_callback, td);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, id);
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_OK;
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpop_reply_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
RedictModuleString *keyname = RedictModule_GetBlockedClientReadyKey(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, keyname, REDICTMODULE_WRITE, 0, &fsl, 0) || !fsl)
|
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(fsl->length);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
|
2023-05-28 03:10:52 -04:00
|
|
|
|
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpop_timeout_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "Request timedout");
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2022-03-08 10:10:36 -05:00
|
|
|
/* FSL.BPOP <key> <timeout> [NO_TO_CB]- Block clients until list has two or more elements.
|
2018-03-29 06:46:13 -04:00
|
|
|
* When that happens, unblock client and pop the last two elements (from the right). */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_bpop(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2022-03-08 10:10:36 -05:00
|
|
|
if (argc < 3)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
long long timeout;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[2],&timeout) != REDICTMODULE_OK || timeout < 0)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid timeout");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2022-03-08 10:10:36 -05:00
|
|
|
int to_cb = 1;
|
|
|
|
if (argc == 4) {
|
2024-03-25 07:41:50 -04:00
|
|
|
if (strcasecmp("NO_TO_CB", RedictModule_StringPtrLen(argv[3], NULL)))
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid argument");
|
2022-03-08 10:10:36 -05:00
|
|
|
to_cb = 0;
|
|
|
|
}
|
|
|
|
|
2018-03-29 06:46:13 -04:00
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_WRITE, 0, &fsl, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2020-01-21 04:39:42 -05:00
|
|
|
if (!fsl) {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_BlockClientOnKeys(ctx, bpop_reply_callback, to_cb ? bpop_timeout_callback : NULL,
|
2018-03-29 06:46:13 -04:00
|
|
|
NULL, timeout, &argv[1], 1, NULL);
|
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(fsl->length);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpopgt_reply_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
RedictModuleString *keyname = RedictModule_GetBlockedClientReadyKey(ctx);
|
|
|
|
long long *pgt = RedictModule_GetBlockedClientPrivateData(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, keyname, REDICTMODULE_WRITE, 0, &fsl, 0) || !fsl)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"UNBLOCKED key no longer exists");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2020-01-21 04:39:42 -05:00
|
|
|
if (fsl->list[fsl->length-1] <= *pgt)
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(fsl->length);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpopgt_timeout_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "Request timedout");
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
void bpopgt_free_privdata(RedictModuleCtx *ctx, void *privdata) {
|
|
|
|
REDICTMODULE_NOT_USED(ctx);
|
|
|
|
RedictModule_Free(privdata);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FSL.BPOPGT <key> <gt> <timeout> - Block clients until list has an element greater than <gt>.
|
|
|
|
* When that happens, unblock client and pop the last element (from the right). */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_bpopgt(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2018-03-29 06:46:13 -04:00
|
|
|
if (argc != 4)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
long long gt;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[2],>) != REDICTMODULE_OK)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid integer");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
long long timeout;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[3],&timeout) != REDICTMODULE_OK || timeout < 0)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid timeout");
|
2018-03-29 06:46:13 -04:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_WRITE, 0, &fsl, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2022-10-18 12:50:02 -04:00
|
|
|
if (!fsl)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR key must exist");
|
2022-10-18 12:50:02 -04:00
|
|
|
|
|
|
|
if (fsl->list[fsl->length-1] <= gt) {
|
2019-12-24 06:44:23 -05:00
|
|
|
/* We use malloc so the tests in blockedonkeys.tcl can check for memory leaks */
|
2024-03-25 07:41:50 -04:00
|
|
|
long long *pgt = RedictModule_Alloc(sizeof(long long));
|
2019-12-24 06:44:23 -05:00
|
|
|
*pgt = gt;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_BlockClientOnKeysWithFlags(
|
2022-10-18 12:50:02 -04:00
|
|
|
ctx, bpopgt_reply_callback, bpopgt_timeout_callback,
|
|
|
|
bpopgt_free_privdata, timeout, &argv[1], 1, pgt,
|
2024-03-25 07:41:50 -04:00
|
|
|
REDICTMODULE_BLOCK_UNBLOCK_DELETED);
|
2018-03-29 06:46:13 -04:00
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(fsl->length);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpoppush_reply_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
RedictModuleString *src_keyname = RedictModule_GetBlockedClientReadyKey(ctx);
|
|
|
|
RedictModuleString *dst_keyname = RedictModule_GetBlockedClientPrivateData(ctx);
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
fsl_t *src;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, src_keyname, REDICTMODULE_WRITE, 0, &src, 0) || !src)
|
|
|
|
return REDICTMODULE_ERR;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
fsl_t *dst;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, dst_keyname, REDICTMODULE_WRITE, 1, &dst, 0) || !dst)
|
|
|
|
return REDICTMODULE_ERR;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(src->length);
|
2020-01-21 04:39:42 -05:00
|
|
|
long long ele = src->list[--src->length];
|
|
|
|
dst->list[dst->length++] = ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SignalKeyAsReady(ctx, dst_keyname);
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
return RedictModule_ReplyWithLongLong(ctx, ele);
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int bpoppush_timeout_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "Request timedout");
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
void bpoppush_free_privdata(RedictModuleCtx *ctx, void *privdata) {
|
|
|
|
RedictModule_FreeString(ctx, privdata);
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FSL.BPOPPUSH <src> <dst> <timeout> - Block clients until <src> has an element.
|
|
|
|
* When that happens, unblock client, pop the last element from <src> and push it to <dst>
|
|
|
|
* (from the right). */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_bpoppush(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2020-01-21 04:39:42 -05:00
|
|
|
if (argc != 4)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
long long timeout;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[3],&timeout) != REDICTMODULE_OK || timeout < 0)
|
|
|
|
return RedictModule_ReplyWithError(ctx,"ERR invalid timeout");
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
fsl_t *src;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_WRITE, 0, &src, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
if (!src) {
|
|
|
|
/* Retain string for reply callback */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_RetainString(ctx, argv[2]);
|
2020-01-21 04:39:42 -05:00
|
|
|
/* Key is empty, we must block */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_BlockClientOnKeys(ctx, bpoppush_reply_callback, bpoppush_timeout_callback,
|
2020-01-21 04:39:42 -05:00
|
|
|
bpoppush_free_privdata, timeout, &argv[1], 1, argv[2]);
|
|
|
|
} else {
|
|
|
|
fsl_t *dst;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[2], REDICTMODULE_WRITE, 1, &dst, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2023-02-14 13:06:30 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_Assert(src->length);
|
2020-01-21 04:39:42 -05:00
|
|
|
long long ele = src->list[--src->length];
|
|
|
|
dst->list[dst->length++] = ele;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SignalKeyAsReady(ctx, argv[2]);
|
|
|
|
RedictModule_ReplyWithLongLong(ctx, ele);
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_OK;
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FSL.GETALL <key> - Reply with an array containing all elements. */
|
2024-03-25 07:41:50 -04:00
|
|
|
int fsl_getall(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2020-01-21 04:39:42 -05:00
|
|
|
if (argc != 2)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
fsl_t *fsl;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (!get_fsl(ctx, argv[1], REDICTMODULE_READ, 0, &fsl, 1))
|
|
|
|
return REDICTMODULE_OK;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
|
|
|
if (!fsl)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_ReplyWithArray(ctx, 0);
|
2020-01-21 04:39:42 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithArray(ctx, fsl->length);
|
2020-01-21 04:39:42 -05:00
|
|
|
for (int i = 0; i < fsl->length; i++)
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithLongLong(ctx, fsl->list[i]);
|
|
|
|
return REDICTMODULE_OK;
|
2020-01-21 04:39:42 -05:00
|
|
|
}
|
|
|
|
|
2021-01-19 06:15:33 -05:00
|
|
|
/* Callback for blockonkeys_popall */
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_popall_reply_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, argv[1], REDICTMODULE_WRITE);
|
|
|
|
if (RedictModule_KeyType(key) == REDICTMODULE_KEYTYPE_LIST) {
|
|
|
|
RedictModuleString *elem;
|
2021-01-19 06:15:33 -05:00
|
|
|
long len = 0;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithArray(ctx, REDICTMODULE_POSTPONED_ARRAY_LEN);
|
|
|
|
while ((elem = RedictModule_ListPop(key, REDICTMODULE_LIST_HEAD)) != NULL) {
|
2021-01-19 06:15:33 -05:00
|
|
|
len++;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithString(ctx, elem);
|
|
|
|
RedictModule_FreeString(ctx, elem);
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
RedictModule_ReplySetArrayLength(ctx, len);
|
2021-01-19 06:15:33 -05:00
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithError(ctx, "ERR Not a list");
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
|
|
|
return REDICTMODULE_OK;
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_popall_timeout_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithError(ctx, "ERR Timeout");
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BLOCKONKEYS.POPALL key
|
|
|
|
*
|
|
|
|
* Blocks on an empty key for up to 3 seconds. When unblocked by a list
|
|
|
|
* operation like LPUSH, all the elements are popped and returned. Fails with an
|
|
|
|
* error on timeout. */
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_popall(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2021-01-19 06:15:33 -05:00
|
|
|
if (argc != 2)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2021-01-19 06:15:33 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, argv[1], REDICTMODULE_READ);
|
|
|
|
if (RedictModule_KeyType(key) == REDICTMODULE_KEYTYPE_EMPTY) {
|
|
|
|
RedictModule_BlockClientOnKeys(ctx, blockonkeys_popall_reply_callback,
|
2021-01-19 06:15:33 -05:00
|
|
|
blockonkeys_popall_timeout_callback,
|
|
|
|
NULL, 3000, &argv[1], 1, NULL);
|
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplyWithError(ctx, "ERR Key not empty");
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
|
|
|
return REDICTMODULE_OK;
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
|
|
|
|
2021-01-22 09:19:37 -05:00
|
|
|
/* BLOCKONKEYS.LPUSH key val [val ..]
|
|
|
|
* BLOCKONKEYS.LPUSH_UNBLOCK key val [val ..]
|
|
|
|
*
|
|
|
|
* A module equivalent of LPUSH. If the name LPUSH_UNBLOCK is used,
|
|
|
|
* RM_SignalKeyAsReady() is also called. */
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_lpush(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
2021-01-19 06:15:33 -05:00
|
|
|
if (argc < 3)
|
2024-03-25 07:41:50 -04:00
|
|
|
return RedictModule_WrongArity(ctx);
|
2021-01-19 06:15:33 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, argv[1], REDICTMODULE_WRITE);
|
|
|
|
if (RedictModule_KeyType(key) != REDICTMODULE_KEYTYPE_EMPTY &&
|
|
|
|
RedictModule_KeyType(key) != REDICTMODULE_KEYTYPE_LIST) {
|
|
|
|
RedictModule_ReplyWithError(ctx, REDICTMODULE_ERRORMSG_WRONGTYPE);
|
2021-01-19 06:15:33 -05:00
|
|
|
} else {
|
|
|
|
for (int i = 2; i < argc; i++) {
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_ListPush(key, REDICTMODULE_LIST_HEAD,
|
|
|
|
argv[i]) != REDICTMODULE_OK) {
|
|
|
|
RedictModule_CloseKey(key);
|
|
|
|
return RedictModule_ReplyWithError(ctx, "ERR Push failed");
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
2021-01-22 09:19:37 -05:00
|
|
|
|
|
|
|
/* signal key as ready if the command is lpush_unblock */
|
|
|
|
size_t len;
|
2024-03-25 07:41:50 -04:00
|
|
|
const char *str = RedictModule_StringPtrLen(argv[0], &len);
|
2021-01-22 09:19:37 -05:00
|
|
|
if (!strncasecmp(str, "blockonkeys.lpush_unblock", len)) {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_SignalKeyAsReady(ctx, argv[1]);
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "OK");
|
2021-01-19 06:15:33 -05:00
|
|
|
}
|
|
|
|
|
2021-01-22 09:19:37 -05:00
|
|
|
/* Callback for the BLOCKONKEYS.BLPOPN command */
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_blpopn_reply_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
2021-01-22 09:19:37 -05:00
|
|
|
long long n;
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_StringToLongLong(argv[2], &n);
|
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, argv[1], REDICTMODULE_WRITE);
|
2021-01-22 09:19:37 -05:00
|
|
|
int result;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_KeyType(key) == REDICTMODULE_KEYTYPE_LIST &&
|
|
|
|
RedictModule_ValueLength(key) >= (size_t)n) {
|
|
|
|
RedictModule_ReplyWithArray(ctx, n);
|
2021-01-22 09:19:37 -05:00
|
|
|
for (long i = 0; i < n; i++) {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleString *elem = RedictModule_ListPop(key, REDICTMODULE_LIST_HEAD);
|
|
|
|
RedictModule_ReplyWithString(ctx, elem);
|
|
|
|
RedictModule_FreeString(ctx, elem);
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
|
|
|
result = REDICTMODULE_OK;
|
|
|
|
} else if (RedictModule_KeyType(key) == REDICTMODULE_KEYTYPE_LIST ||
|
|
|
|
RedictModule_KeyType(key) == REDICTMODULE_KEYTYPE_EMPTY) {
|
|
|
|
const char *module_cmd = RedictModule_StringPtrLen(argv[0], NULL);
|
2023-03-08 03:08:54 -05:00
|
|
|
if (!strcasecmp(module_cmd, "blockonkeys.blpopn_or_unblock"))
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_UnblockClient(RedictModule_GetBlockedClientHandle(ctx), NULL);
|
2023-03-08 03:08:54 -05:00
|
|
|
|
2021-01-22 09:19:37 -05:00
|
|
|
/* continue blocking */
|
2024-03-25 07:41:50 -04:00
|
|
|
result = REDICTMODULE_ERR;
|
2021-01-22 09:19:37 -05:00
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
result = RedictModule_ReplyWithError(ctx, REDICTMODULE_ERRORMSG_WRONGTYPE);
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
2021-01-22 09:19:37 -05:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_blpopn_timeout_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithError(ctx, "ERR Timeout");
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_blpopn_abort_callback(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
|
|
|
return RedictModule_ReplyWithSimpleString(ctx, "Action aborted");
|
2023-03-08 03:08:54 -05:00
|
|
|
}
|
|
|
|
|
2021-01-22 09:19:37 -05:00
|
|
|
/* BLOCKONKEYS.BLPOPN key N
|
|
|
|
*
|
|
|
|
* Blocks until key has N elements and then pops them or fails after 3 seconds.
|
|
|
|
*/
|
2024-03-25 07:41:50 -04:00
|
|
|
int blockonkeys_blpopn(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
if (argc < 3) return RedictModule_WrongArity(ctx);
|
2021-01-22 09:19:37 -05:00
|
|
|
|
2023-03-08 03:08:54 -05:00
|
|
|
long long n, timeout = 3000LL;
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[2], &n) != REDICTMODULE_OK) {
|
|
|
|
return RedictModule_ReplyWithError(ctx, "ERR Invalid N");
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
|
|
|
|
2023-03-08 03:08:54 -05:00
|
|
|
if (argc > 3 ) {
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_StringToLongLong(argv[3], &timeout) != REDICTMODULE_OK) {
|
|
|
|
return RedictModule_ReplyWithError(ctx, "ERR Invalid timeout value");
|
2023-03-08 03:08:54 -05:00
|
|
|
}
|
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleKey *key = RedictModule_OpenKey(ctx, argv[1], REDICTMODULE_WRITE);
|
|
|
|
int keytype = RedictModule_KeyType(key);
|
|
|
|
if (keytype != REDICTMODULE_KEYTYPE_EMPTY &&
|
|
|
|
keytype != REDICTMODULE_KEYTYPE_LIST) {
|
|
|
|
RedictModule_ReplyWithError(ctx, REDICTMODULE_ERRORMSG_WRONGTYPE);
|
|
|
|
} else if (keytype == REDICTMODULE_KEYTYPE_LIST &&
|
|
|
|
RedictModule_ValueLength(key) >= (size_t)n) {
|
|
|
|
RedictModule_ReplyWithArray(ctx, n);
|
2021-01-22 09:19:37 -05:00
|
|
|
for (long i = 0; i < n; i++) {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleString *elem = RedictModule_ListPop(key, REDICTMODULE_LIST_HEAD);
|
|
|
|
RedictModule_ReplyWithString(ctx, elem);
|
|
|
|
RedictModule_FreeString(ctx, elem);
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
2023-05-28 03:10:52 -04:00
|
|
|
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_ReplicateVerbatim(ctx);
|
2021-01-22 09:19:37 -05:00
|
|
|
} else {
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_BlockClientOnKeys(ctx, blockonkeys_blpopn_reply_callback,
|
2023-03-08 03:08:54 -05:00
|
|
|
timeout ? blockonkeys_blpopn_timeout_callback : blockonkeys_blpopn_abort_callback,
|
|
|
|
NULL, timeout, &argv[1], 1, NULL);
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModule_CloseKey(key);
|
|
|
|
return REDICTMODULE_OK;
|
2021-01-22 09:19:37 -05:00
|
|
|
}
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
int RedictModule_OnLoad(RedictModuleCtx *ctx, RedictModuleString **argv, int argc) {
|
|
|
|
REDICTMODULE_NOT_USED(argv);
|
|
|
|
REDICTMODULE_NOT_USED(argc);
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_Init(ctx, "blockonkeys", 1, REDICTMODULE_APIVER_1)== REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
RedictModuleTypeMethods tm = {
|
|
|
|
.version = REDICTMODULE_TYPE_METHOD_VERSION,
|
2018-03-29 06:46:13 -04:00
|
|
|
.rdb_load = fsl_rdb_load,
|
|
|
|
.rdb_save = fsl_rdb_save,
|
|
|
|
.aof_rewrite = fsl_aofrw,
|
|
|
|
.mem_usage = NULL,
|
|
|
|
.free = fsl_free,
|
2022-10-18 12:50:02 -04:00
|
|
|
.digest = NULL,
|
2018-03-29 06:46:13 -04:00
|
|
|
};
|
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
fsltype = RedictModule_CreateDataType(ctx, "fsltype_t", 0, &tm);
|
2018-03-29 06:46:13 -04:00
|
|
|
if (fsltype == NULL)
|
2024-03-25 07:41:50 -04:00
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.push",fsl_push,"write",1,1,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.pushtimer",fsl_pushtimer,"write",1,1,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
Modules: Unblock from within a timer coverage (#12337)
Apart from adding the missing coverage, this PR also adds `blockedBeforeSleep`
that gathers all block-related functions from `beforeSleep`
The order inside `blockedBeforeSleep` is different: now `handleClientsBlockedOnKeys`
(which may unblock clients) is called before `processUnblockedClients` (which handles
unblocked clients).
It makes sense to have this order.
There are no visible effects of the wrong ordering, except some cleanups of the now-unblocked
client would have happen in the next `beforeSleep` (will now happen in the current one)
The reason we even got into it is because i triggers an assertion in logresreq.c (breaking
the assumption that `unblockClient` is called **before** actually flushing the reply to the socket):
`handleClientsBlockedOnKeys` is called, then it calls `moduleUnblockClientOnKey`, which calls
`moduleUnblockClient`, which adds the client to `moduleUnblockedClients` back to `beforeSleep`,
we call `handleClientsWithPendingWritesUsingThreads`, it writes the data of buf to the client, so
`client->bufpos` became 0
On the next `beforeSleep`, we call `moduleHandleBlockedClients`, which calls `unblockClient`,
which calls `reqresAppendResponse`, triggering the assert. (because the `bufpos` is 0) - see https://github.com/redis/redis/pull/12301#discussion_r1226386716
2023-06-22 16:15:16 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.bpop",fsl_bpop,"write",1,1,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.bpopgt",fsl_bpopgt,"write",1,1,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2018-03-29 06:46:13 -04:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.bpoppush",fsl_bpoppush,"write",1,2,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx,"fsl.getall",fsl_getall,"",1,1,1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2020-01-21 04:39:42 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx, "blockonkeys.popall", blockonkeys_popall,
|
|
|
|
"write", 1, 1, 1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2021-01-19 06:15:33 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx, "blockonkeys.lpush", blockonkeys_lpush,
|
|
|
|
"write", 1, 1, 1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2021-01-19 06:15:33 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx, "blockonkeys.lpush_unblock", blockonkeys_lpush,
|
|
|
|
"write", 1, 1, 1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2021-01-22 09:19:37 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx, "blockonkeys.blpopn", blockonkeys_blpopn,
|
|
|
|
"write", 1, 1, 1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
2021-01-22 09:19:37 -05:00
|
|
|
|
2024-03-25 07:41:50 -04:00
|
|
|
if (RedictModule_CreateCommand(ctx, "blockonkeys.blpopn_or_unblock", blockonkeys_blpopn,
|
|
|
|
"write", 1, 1, 1) == REDICTMODULE_ERR)
|
|
|
|
return REDICTMODULE_ERR;
|
|
|
|
return REDICTMODULE_OK;
|
2018-03-29 06:46:13 -04:00
|
|
|
}
|