Projektstart
This commit is contained in:
21
backend/node_modules/bullmq/LICENSE
generated
vendored
Normal file
21
backend/node_modules/bullmq/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 BullForce Labs AB and contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
295
backend/node_modules/bullmq/README.md
generated
vendored
Normal file
295
backend/node_modules/bullmq/README.md
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
<div align="center">
|
||||
<br/>
|
||||
<img src="https://user-images.githubusercontent.com/95200/143832033-32e868df-f3b0-4251-97fb-c64809a43d36.png" width="800" />
|
||||
<br/>
|
||||
<br/>
|
||||
<p>
|
||||
The fastest, most reliable, Redis-based distributed queue for Node. <br/>
|
||||
Carefully written for rock solid stability and atomicity.
|
||||
</p>
|
||||
Read the <a href="https://docs.bullmq.io">documentation</a>
|
||||
<br/>
|
||||
<br/>
|
||||
<p>
|
||||
<a href="https://join.slack.com/t/bullmq/shared_invite/zt-1nbtpk6mv-TItWpF9jf3k4yrCaS0PPZA">
|
||||
<img src="https://img.shields.io/badge/Slack-4A154B"/>
|
||||
</a>
|
||||
<a href="https://badge.fury.io/js/bullmq">
|
||||
<img src="https://badge.fury.io/js/bullmq.svg"/>
|
||||
</a>
|
||||
<a href="https://coveralls.io/github/taskforcesh/bullmq?branch=master">
|
||||
<img src="https://coveralls.io/repos/github/taskforcesh/bullmq/badge.svg?branch=master"/>
|
||||
</a>
|
||||
<a href="https://github.com/semantic-release/semantic-release">
|
||||
<img src="https://img.shields.io/badge/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic--release-e10079.svg"/>
|
||||
</a>
|
||||
<a href="https://github.com/sponsors/taskforcesh">
|
||||
<img src="https://img.shields.io/badge/sponsor-30363D?style=flat&logo=GitHub-Sponsors&logoColor=EA4AAA"/>
|
||||
</a>
|
||||
</p>
|
||||
<p>
|
||||
<em>Follow <a href="https://twitter.com/manast">@manast</a> for *important* Bull/BullMQ/BullMQ-Pro news and updates!</em>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
# 🛠 Tutorials
|
||||
|
||||
You can find tutorials and news in this blog: https://blog.taskforce.sh/
|
||||
|
||||
# News 🚀
|
||||
|
||||
## 🌐 Language agnostic BullMQ
|
||||
|
||||
Do you need to work with BullMQ on platforms other than Node.js? If so, check out the [BullMQ Proxy](https://github.com/taskforcesh/bullmq-proxy)
|
||||
|
||||
# Official FrontEnd
|
||||
|
||||
[<img src="https://taskforce.sh/assets/logo_square.png" width="150" alt="Taskforce.sh, Inc" style="padding: 200px"/>](https://taskforce.sh)
|
||||
|
||||
Supercharge your queues with a professional front end:
|
||||
|
||||
- Get a complete overview of all your queues.
|
||||
- Inspect jobs, search, retry, or promote delayed jobs.
|
||||
- Metrics and statistics.
|
||||
- and many more features.
|
||||
|
||||
Sign up at [Taskforce.sh](https://taskforce.sh)
|
||||
|
||||
# 🚀 Sponsors 🚀
|
||||
|
||||
<table cellspacing="0" cellpadding="0" border="0">
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://www.dragonflydb.io/">
|
||||
<img src="https://raw.githubusercontent.com/dragonflydb/dragonfly/main/.github/images/logo-full.svg" width=550 alt="Dragonfly" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
Dragonfly is a new Redis™ drop-in replacement that is fully compatible with BullMQ and brings some important advantages over Redis™ such as massive
|
||||
better performance by utilizing all CPU cores available and faster and more memory efficient data structures. Read more <a href="https://www.dragonflydb.io/docs/integrations/bullmq">here</a> on how to use it with BullMQ.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Used by
|
||||
|
||||
Some notable organizations using BullMQ:
|
||||
|
||||
<table cellspacing="0" cellpadding="0">
|
||||
<tr>
|
||||
<td valign="center">
|
||||
<a href="https://github.com/microsoft/lage">
|
||||
<img
|
||||
src="https://files.gitbook.com/v0/b/gitbook-x-prod.appspot.com/o/spaces%2F-LUuDmt_xXMfG66Rn1GA%2Fuploads%2FUvwInTAmk7hxAViDwJzU%2Fclipart1565701.png?alt=media"
|
||||
width="150"
|
||||
alt="Microsoft"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://github.com/vendure-ecommerce/vendure">
|
||||
<img
|
||||
src="https://files.gitbook.com/v0/b/gitbook-x-prod.appspot.com/o/spaces%2F-LUuDmt_xXMfG66Rn1GA%2Fuploads%2FvT30DUqsi61gL8edn3R2%2Fwordmark-logo.png?alt=media"
|
||||
width="150"
|
||||
alt="Vendure"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://github.com/datawrapper/datawrapper">
|
||||
<img
|
||||
src="https://files.gitbook.com/v0/b/gitbook-x-prod.appspot.com/o/spaces%2F-LUuDmt_xXMfG66Rn1GA%2Fuploads%2FCJ5XmotpBBsuSgD8CilC%2Fdatawrapper-logo.png?alt=media"
|
||||
width="150"
|
||||
alt="Datawrapper"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://github.com/nestjs/bull/tree/master/packages/bullmq">
|
||||
<img
|
||||
src="https://876297641-files.gitbook.io/~/files/v0/b/gitbook-x-prod.appspot.com/o/spaces%2F-LUuDmt_xXMfG66Rn1GA%2Fuploads%2FfAcGye182utFUtPKdLqJ%2FScreenshot%202022-02-15%20at%2011.32.39.png?alt=media&token=29feb550-f0bc-467d-a290-f700701d7d15"
|
||||
width="150"
|
||||
alt="Nest"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://langfuse.com">
|
||||
<img
|
||||
src="https://langfuse.com/langfuse_logo.svg"
|
||||
width="150"
|
||||
alt="Langfuse"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td valign="center">
|
||||
<a href="https://github.com/teamcurri">
|
||||
<img
|
||||
src="https://user-images.githubusercontent.com/659829/161662129-ae645bc4-c1e9-48ff-997e-4cee281a964a.png"
|
||||
width="150"
|
||||
alt="Curri"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://novu.co">
|
||||
<img
|
||||
src="https://assets.super.so/1e9f5a51-c4c6-4fca-b6e8-25fa0186f139/images/0f550019-16db-4a65-90d1-1bdb7d3c5f20/novu-logo-gradient-light-background2x.png"
|
||||
width="150"
|
||||
alt="Novu"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://www.nocodb.com">
|
||||
<img
|
||||
src="https://github.com/nocodb/nocodb/raw/develop/packages/nc-gui/assets/img/icons/512x512.png"
|
||||
width="50"
|
||||
alt="NoCodeDB"
|
||||
/>
|
||||
</a>
|
||||
</td>
|
||||
</td>
|
||||
<td valign="center">
|
||||
<a href="https://infisical.com/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://mintlify.s3-us-west-1.amazonaws.com/infisical/logo/dark.svg">
|
||||
<img
|
||||
src="https://mintlify.s3-us-west-1.amazonaws.com/infisical/logo/light.svg"
|
||||
width="150"
|
||||
alt="Infisical"
|
||||
/>
|
||||
</picture>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# The gist
|
||||
|
||||
Install:
|
||||
|
||||
```
|
||||
$ yarn add bullmq
|
||||
```
|
||||
|
||||
Add jobs to the queue:
|
||||
|
||||
```ts
|
||||
import { Queue } from 'bullmq';
|
||||
|
||||
const queue = new Queue('Paint');
|
||||
|
||||
queue.add('cars', { color: 'blue' });
|
||||
```
|
||||
|
||||
Process the jobs in your workers:
|
||||
|
||||
```ts
|
||||
import { Worker } from 'bullmq';
|
||||
|
||||
const worker = new Worker('Paint', async job => {
|
||||
if (job.name === 'cars') {
|
||||
await paintCar(job.data.color);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
Listen to jobs for completion:
|
||||
|
||||
```ts
|
||||
import { QueueEvents } from 'bullmq';
|
||||
|
||||
const queueEvents = new QueueEvents('Paint');
|
||||
|
||||
queueEvents.on('completed', ({ jobId }) => {
|
||||
console.log('done painting');
|
||||
});
|
||||
|
||||
queueEvents.on(
|
||||
'failed',
|
||||
({ jobId, failedReason }: { jobId: string; failedReason: string }) => {
|
||||
console.error('error painting', failedReason);
|
||||
},
|
||||
);
|
||||
```
|
||||
|
||||
Adds jobs with parent-child relationship:
|
||||
|
||||
```ts
|
||||
import { FlowProducer } from 'bullmq';
|
||||
|
||||
const flow = new FlowProducer();
|
||||
|
||||
const originalTree = await flow.add({
|
||||
name: 'root-job',
|
||||
queueName: 'topQueueName',
|
||||
data: {},
|
||||
children: [
|
||||
{
|
||||
name: 'child-job',
|
||||
data: { idx: 0, foo: 'bar' },
|
||||
queueName: 'childrenQueueName',
|
||||
children: [
|
||||
{
|
||||
name: 'grandchild-job',
|
||||
data: { idx: 1, foo: 'bah' },
|
||||
queueName: 'grandChildrenQueueName',
|
||||
},
|
||||
{
|
||||
name: 'grandchild-job',
|
||||
data: { idx: 2, foo: 'baz' },
|
||||
queueName: 'grandChildrenQueueName',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'child-job',
|
||||
data: { idx: 3, foo: 'foo' },
|
||||
queueName: 'childrenQueueName',
|
||||
},
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
This is just scratching the surface, check all the features and more in the official <a href="https://docs.bullmq.io">documentation</a>
|
||||
|
||||
# Feature Comparison
|
||||
|
||||
Since there are a few job queue solutions, here is a table comparing them:
|
||||
|
||||
| Feature | [BullMQ-Pro](https://bullmq.io/#bullmq-pro) | [BullMQ](https://bullmq.io) | Bull | Kue | Bee | Agenda |
|
||||
| :------------------------- | :-----------------------------------------: | :-------------------------: | :-------------: | :---: | -------- | ------ |
|
||||
| Backend | redis | redis | redis | redis | redis | mongo |
|
||||
| Observables | ✓ | | | | | |
|
||||
| Group Rate Limit | ✓ | | | | | |
|
||||
| Group Support | ✓ | | | | | |
|
||||
| Batches Support | ✓ | | | | | |
|
||||
| Parent/Child Dependencies | ✓ | ✓ | | | | |
|
||||
| Deduplication (Debouncing) | ✓ | ✓ | ✓ | | | |
|
||||
| Deduplication (Throttling) | ✓ | ✓ | ✓ | | | |
|
||||
| Priorities | ✓ | ✓ | ✓ | ✓ | | ✓ |
|
||||
| Concurrency | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| Delayed jobs | ✓ | ✓ | ✓ | ✓ | | ✓ |
|
||||
| Global events | ✓ | ✓ | ✓ | ✓ | | |
|
||||
| Rate Limiter | ✓ | ✓ | ✓ | | | |
|
||||
| Pause/Resume | ✓ | ✓ | ✓ | ✓ | | |
|
||||
| Sandboxed worker | ✓ | ✓ | ✓ | | | |
|
||||
| Repeatable jobs | ✓ | ✓ | ✓ | | | ✓ |
|
||||
| Atomic ops | ✓ | ✓ | ✓ | | ✓ | |
|
||||
| Persistence | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
|
||||
| UI | ✓ | ✓ | ✓ | ✓ | | ✓ |
|
||||
| Optimized for | Jobs / Messages | Jobs / Messages | Jobs / Messages | Jobs | Messages | Jobs |
|
||||
|
||||
## Contributing
|
||||
|
||||
Fork the repo, make some changes, submit a pull-request! Here is the [contributing](https://github.com/taskforcesh/bullmq/blob/master/contributing.md) doc that has more details.
|
||||
|
||||
# Thanks
|
||||
|
||||
Thanks for all the contributors that made this library possible,
|
||||
also a special mention to Leon van Kammen that kindly donated
|
||||
his npm bullmq repo.
|
||||
134
backend/node_modules/bullmq/dist/cjs/classes/async-fifo-queue.js
generated
vendored
Normal file
134
backend/node_modules/bullmq/dist/cjs/classes/async-fifo-queue.js
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
"use strict";
|
||||
/**
|
||||
* (c) 2017-2025 BullForce Labs AB, MIT Licensed.
|
||||
* @see LICENSE.md
|
||||
*
|
||||
*/
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.AsyncFifoQueue = void 0;
|
||||
class Node {
|
||||
constructor(value) {
|
||||
this.value = undefined;
|
||||
this.next = null;
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
class LinkedList {
|
||||
constructor() {
|
||||
this.length = 0;
|
||||
this.head = null;
|
||||
this.tail = null;
|
||||
}
|
||||
push(value) {
|
||||
const newNode = new Node(value);
|
||||
if (!this.length) {
|
||||
this.head = newNode;
|
||||
}
|
||||
else {
|
||||
this.tail.next = newNode;
|
||||
}
|
||||
this.tail = newNode;
|
||||
this.length += 1;
|
||||
return newNode;
|
||||
}
|
||||
shift() {
|
||||
if (!this.length) {
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
const head = this.head;
|
||||
this.head = this.head.next;
|
||||
this.length -= 1;
|
||||
return head;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* AsyncFifoQueue
|
||||
*
|
||||
* A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
|
||||
* and consume them in the order they are resolved.
|
||||
*/
|
||||
class AsyncFifoQueue {
|
||||
constructor(ignoreErrors = false) {
|
||||
this.ignoreErrors = ignoreErrors;
|
||||
/**
|
||||
* A queue of completed promises. As the pending
|
||||
* promises are resolved, they are added to this queue.
|
||||
*/
|
||||
this.queue = new LinkedList();
|
||||
/**
|
||||
* A set of pending promises.
|
||||
*/
|
||||
this.pending = new Set();
|
||||
this.newPromise();
|
||||
}
|
||||
add(promise) {
|
||||
this.pending.add(promise);
|
||||
promise
|
||||
.then(data => {
|
||||
this.pending.delete(promise);
|
||||
if (this.queue.length === 0) {
|
||||
this.resolvePromise(data);
|
||||
}
|
||||
this.queue.push(data);
|
||||
})
|
||||
.catch(err => {
|
||||
// Ignore errors
|
||||
if (this.ignoreErrors) {
|
||||
this.queue.push(undefined);
|
||||
}
|
||||
this.pending.delete(promise);
|
||||
this.rejectPromise(err);
|
||||
});
|
||||
}
|
||||
async waitAll() {
|
||||
await Promise.all(this.pending);
|
||||
}
|
||||
numTotal() {
|
||||
return this.pending.size + this.queue.length;
|
||||
}
|
||||
numPending() {
|
||||
return this.pending.size;
|
||||
}
|
||||
numQueued() {
|
||||
return this.queue.length;
|
||||
}
|
||||
resolvePromise(data) {
|
||||
this.resolve(data);
|
||||
this.newPromise();
|
||||
}
|
||||
rejectPromise(err) {
|
||||
this.reject(err);
|
||||
this.newPromise();
|
||||
}
|
||||
newPromise() {
|
||||
this.nextPromise = new Promise((resolve, reject) => {
|
||||
this.resolve = resolve;
|
||||
this.reject = reject;
|
||||
});
|
||||
}
|
||||
async wait() {
|
||||
return this.nextPromise;
|
||||
}
|
||||
async fetch() {
|
||||
var _a;
|
||||
if (this.pending.size === 0 && this.queue.length === 0) {
|
||||
return;
|
||||
}
|
||||
while (this.queue.length === 0) {
|
||||
try {
|
||||
await this.wait();
|
||||
}
|
||||
catch (err) {
|
||||
// Ignore errors
|
||||
if (!this.ignoreErrors) {
|
||||
console.error('Unexpected Error in AsyncFifoQueue', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (_a = this.queue.shift()) === null || _a === void 0 ? void 0 : _a.value;
|
||||
}
|
||||
}
|
||||
exports.AsyncFifoQueue = AsyncFifoQueue;
|
||||
//# sourceMappingURL=async-fifo-queue.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/async-fifo-queue.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/async-fifo-queue.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"async-fifo-queue.js","sourceRoot":"","sources":["../../../src/classes/async-fifo-queue.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAEH,MAAM,IAAI;IAIR,YAAY,KAAQ;QAHpB,UAAK,GAAkB,SAAS,CAAC;QACjC,SAAI,GAAmB,IAAI,CAAC;QAG1B,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;CACF;AAED,MAAM,UAAU;IAKd;QAJA,WAAM,GAAG,CAAC,CAAC;QAKT,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;QACjB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED,IAAI,CAAC,KAAQ;QACX,MAAM,OAAO,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,CAAC;QAChC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACtB,CAAC;aAAM,CAAC;YACN,IAAI,CAAC,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QAC3B,CAAC;QAED,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACpB,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;QACjB,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,KAAK;QACH,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,OAAO,IAAI,CAAC;QACd,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;YACvB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC;YAC3B,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;YAEjB,OAAO,IAAI,CAAC;QACd,CAAC;IACH,CAAC;CACF;AAED;;;;;GAKG;AACH,MAAa,cAAc;IAqBzB,YAAoB,eAAe,KAAK;QAApB,iBAAY,GAAZ,YAAY,CAAQ;QApBxC;;;WAGG;QACK,UAAK,GAAkB,IAAI,UAAU,EAAE,CAAC;QAEhD;;WAEG;QACK,YAAO,GAAG,IAAI,GAAG,EAAc,CAAC;QAYtC,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEM,GAAG,CAAC,OAAmB;QAC5B,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAE1B,OAAO;aACJ,IAAI,CAAC,IAAI,CAAC,EAAE;YACX,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAE7B,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBAC5B,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC;YAC5B,CAAC;YACD,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QACxB,CAAC,CAAC;aACD,KAAK,CAAC,GAAG,CAAC,EAAE;YACX,gBAAgB;YAChB,IAAI,IAAI,CAAC,YAAY,EAAE,CAAC;gBACtB,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;YAC7B,CAAC;YACD,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAC7B,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC;QAC1B,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,KAAK,CAAC,OAAO;QAClB,MAAM,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IAClC,CAAC;IAEM,QAAQ;QACb,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC/C,CAAC;IAEM,UAAU;QACf,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC;IAC3B,CAAC;IAEM,SAAS;QACd,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC3B,CAAC;IAEO,cAAc,CAAC,IAAO;QAC5B,IAAI,CAAC,OAAQ,CAAC,IAAI,CAAC,CAAC;QACpB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,aAAa,CAAC,GAAQ;QAC5B,IAAI,CAAC,MAAO,CAAC,GAAG,CAAC,CAAC;QAClB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,UAAU;QAChB,IAAI,CAAC,WAAW,GAAG,IAAI,OAAO,CAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;YAChE,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACvB,CAAC,CAAC,CAAC;IACL,CAAC;IAEO,KAAK,CAAC,IAAI;QAChB,OAAO,IAAI,CAAC,WAAW,CAAC;IAC1B,CAAC;IAEM,KAAK,CAAC,KAAK;;QAChB,IAAI,IAAI,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACvD,OAAO;QACT,CAAC;QACD,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC/B,IAAI,CAAC;gBACH,MAAM,IAAI,CAAC,IAAI,EAAE,CAAC;YACpB,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,gBAAgB;gBAChB,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,CAAC;oBACvB,OAAO,CAAC,KAAK,CAAC,oCAAoC,EAAE,GAAG,CAAC,CAAC;gBAC3D,CAAC;YACH,CAAC;QACH,CAAC;QACD,OAAO,MAAA,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,0CAAE,KAAK,CAAC;IACnC,CAAC;CACF;AApGD,wCAoGC"}
|
||||
61
backend/node_modules/bullmq/dist/cjs/classes/backoffs.js
generated
vendored
Normal file
61
backend/node_modules/bullmq/dist/cjs/classes/backoffs.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Backoffs = void 0;
|
||||
class Backoffs {
|
||||
static normalize(backoff) {
|
||||
if (Number.isFinite(backoff)) {
|
||||
return {
|
||||
type: 'fixed',
|
||||
delay: backoff,
|
||||
};
|
||||
}
|
||||
else if (backoff) {
|
||||
return backoff;
|
||||
}
|
||||
}
|
||||
static calculate(backoff, attemptsMade, err, job, customStrategy) {
|
||||
if (backoff) {
|
||||
const strategy = lookupStrategy(backoff, customStrategy);
|
||||
return strategy(attemptsMade, backoff.type, err, job);
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.Backoffs = Backoffs;
|
||||
Backoffs.builtinStrategies = {
|
||||
fixed: function (delay, jitter = 0) {
|
||||
return function () {
|
||||
if (jitter > 0) {
|
||||
const minDelay = delay * (1 - jitter);
|
||||
return Math.floor(Math.random() * delay * jitter + minDelay);
|
||||
}
|
||||
else {
|
||||
return delay;
|
||||
}
|
||||
};
|
||||
},
|
||||
exponential: function (delay, jitter = 0) {
|
||||
return function (attemptsMade) {
|
||||
if (jitter > 0) {
|
||||
const maxDelay = Math.round(Math.pow(2, attemptsMade - 1) * delay);
|
||||
const minDelay = maxDelay * (1 - jitter);
|
||||
return Math.floor(Math.random() * maxDelay * jitter + minDelay);
|
||||
}
|
||||
else {
|
||||
return Math.round(Math.pow(2, attemptsMade - 1) * delay);
|
||||
}
|
||||
};
|
||||
},
|
||||
};
|
||||
function lookupStrategy(backoff, customStrategy) {
|
||||
if (backoff.type in Backoffs.builtinStrategies) {
|
||||
return Backoffs.builtinStrategies[backoff.type](backoff.delay, backoff.jitter);
|
||||
}
|
||||
else if (customStrategy) {
|
||||
return customStrategy;
|
||||
}
|
||||
else {
|
||||
throw new Error(`Unknown backoff strategy ${backoff.type}.
|
||||
If a custom backoff strategy is used, specify it when the queue is created.`);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=backoffs.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/backoffs.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/backoffs.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"backoffs.js","sourceRoot":"","sources":["../../../src/classes/backoffs.ts"],"names":[],"mappings":";;;AAQA,MAAa,QAAQ;IA4BnB,MAAM,CAAC,SAAS,CACd,OAAgC;QAEhC,IAAI,MAAM,CAAC,QAAQ,CAAS,OAAO,CAAC,EAAE,CAAC;YACrC,OAAO;gBACL,IAAI,EAAE,OAAO;gBACb,KAAK,EAAU,OAAO;aACvB,CAAC;QACJ,CAAC;aAAM,IAAI,OAAO,EAAE,CAAC;YACnB,OAAuB,OAAO,CAAC;QACjC,CAAC;IACH,CAAC;IAED,MAAM,CAAC,SAAS,CACd,OAAuB,EACvB,YAAoB,EACpB,GAAU,EACV,GAAe,EACf,cAAgC;QAEhC,IAAI,OAAO,EAAE,CAAC;YACZ,MAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;YAEzD,OAAO,QAAQ,CAAC,YAAY,EAAE,OAAO,CAAC,IAAI,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;QACxD,CAAC;IACH,CAAC;;AArDH,4BAsDC;AArDQ,0BAAiB,GAAsB;IAC5C,KAAK,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QACxC,OAAO;YACL,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,KAAK,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEtC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,KAAK,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAC/D,CAAC;iBAAM,CAAC;gBACN,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;IAED,WAAW,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QAC9C,OAAO,UAAU,YAAoB;YACnC,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;gBACnE,MAAM,QAAQ,GAAG,QAAQ,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEzC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,QAAQ,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAClE,CAAC;iBAAM,CAAC;gBACN,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;YAC3D,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;CACF,CAAC;AA8BJ,SAAS,cAAc,CACrB,OAAuB,EACvB,cAAgC;IAEhC,IAAI,OAAO,CAAC,IAAI,IAAI,QAAQ,CAAC,iBAAiB,EAAE,CAAC;QAC/C,OAAO,QAAQ,CAAC,iBAAiB,CAAC,OAAO,CAAC,IAAI,CAAC,CAC7C,OAAO,CAAC,KAAM,EACd,OAAO,CAAC,MAAM,CACf,CAAC;IACJ,CAAC;SAAM,IAAI,cAAc,EAAE,CAAC;QAC1B,OAAO,cAAc,CAAC;IACxB,CAAC;SAAM,CAAC;QACN,MAAM,IAAI,KAAK,CACb,4BAA4B,OAAO,CAAC,IAAI;kFACoC,CAC7E,CAAC;IACJ,CAAC;AACH,CAAC"}
|
||||
83
backend/node_modules/bullmq/dist/cjs/classes/child-pool.js
generated
vendored
Normal file
83
backend/node_modules/bullmq/dist/cjs/classes/child-pool.js
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ChildPool = void 0;
|
||||
const path = require("path");
|
||||
const child_1 = require("./child");
|
||||
const CHILD_KILL_TIMEOUT = 30000;
|
||||
const supportCJS = () => {
|
||||
return (typeof require === 'function' &&
|
||||
typeof module === 'object' &&
|
||||
typeof module.exports === 'object');
|
||||
};
|
||||
class ChildPool {
|
||||
constructor({ mainFile = supportCJS()
|
||||
? path.join(process.cwd(), 'dist/cjs/classes/main.js')
|
||||
: path.join(process.cwd(), 'dist/esm/classes/main.js'), useWorkerThreads, workerForkOptions, workerThreadsOptions, }) {
|
||||
this.retained = {};
|
||||
this.free = {};
|
||||
this.opts = {
|
||||
mainFile,
|
||||
useWorkerThreads,
|
||||
workerForkOptions,
|
||||
workerThreadsOptions,
|
||||
};
|
||||
}
|
||||
async retain(processFile) {
|
||||
let child = this.getFree(processFile).pop();
|
||||
if (child) {
|
||||
this.retained[child.pid] = child;
|
||||
return child;
|
||||
}
|
||||
child = new child_1.Child(this.opts.mainFile, processFile, {
|
||||
useWorkerThreads: this.opts.useWorkerThreads,
|
||||
workerForkOptions: this.opts.workerForkOptions,
|
||||
workerThreadsOptions: this.opts.workerThreadsOptions,
|
||||
});
|
||||
child.on('exit', this.remove.bind(this, child));
|
||||
try {
|
||||
await child.init();
|
||||
// Check status here as well, in case the child exited before we could
|
||||
// retain it.
|
||||
if (child.exitCode !== null || child.signalCode !== null) {
|
||||
throw new Error('Child exited before it could be retained');
|
||||
}
|
||||
this.retained[child.pid] = child;
|
||||
return child;
|
||||
}
|
||||
catch (err) {
|
||||
console.error(err);
|
||||
this.release(child);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
release(child) {
|
||||
delete this.retained[child.pid];
|
||||
this.getFree(child.processFile).push(child);
|
||||
}
|
||||
remove(child) {
|
||||
delete this.retained[child.pid];
|
||||
const free = this.getFree(child.processFile);
|
||||
const childIndex = free.indexOf(child);
|
||||
if (childIndex > -1) {
|
||||
free.splice(childIndex, 1);
|
||||
}
|
||||
}
|
||||
async kill(child, signal = 'SIGKILL') {
|
||||
this.remove(child);
|
||||
return child.kill(signal, CHILD_KILL_TIMEOUT);
|
||||
}
|
||||
async clean() {
|
||||
const children = Object.values(this.retained).concat(this.getAllFree());
|
||||
this.retained = {};
|
||||
this.free = {};
|
||||
await Promise.all(children.map(c => this.kill(c, 'SIGTERM')));
|
||||
}
|
||||
getFree(id) {
|
||||
return (this.free[id] = this.free[id] || []);
|
||||
}
|
||||
getAllFree() {
|
||||
return Object.values(this.free).reduce((first, second) => first.concat(second), []);
|
||||
}
|
||||
}
|
||||
exports.ChildPool = ChildPool;
|
||||
//# sourceMappingURL=child-pool.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/child-pool.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/child-pool.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"child-pool.js","sourceRoot":"","sources":["../../../src/classes/child-pool.ts"],"names":[],"mappings":";;;AAAA,6BAA6B;AAC7B,mCAAgC;AAGhC,MAAM,kBAAkB,GAAG,KAAM,CAAC;AAMlC,MAAM,UAAU,GAAG,GAAG,EAAE;IACtB,OAAO,CACL,OAAO,OAAO,KAAK,UAAU;QAC7B,OAAO,MAAM,KAAK,QAAQ;QAC1B,OAAO,MAAM,CAAC,OAAO,KAAK,QAAQ,CACnC,CAAC;AACJ,CAAC,CAAC;AAEF,MAAa,SAAS;IAKpB,YAAY,EACV,QAAQ,GAAG,UAAU,EAAE;QACrB,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC;QACtD,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC,EACxD,gBAAgB,EAChB,iBAAiB,EACjB,oBAAoB,GACN;QAXhB,aAAQ,GAA6B,EAAE,CAAC;QACxC,SAAI,GAA+B,EAAE,CAAC;QAWpC,IAAI,CAAC,IAAI,GAAG;YACV,QAAQ;YACR,gBAAgB;YAChB,iBAAiB;YACjB,oBAAoB;SACrB,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,MAAM,CAAC,WAAmB;QAC9B,IAAI,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC,GAAG,EAAE,CAAC;QAE5C,IAAI,KAAK,EAAE,CAAC;YACV,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YACjC,OAAO,KAAK,CAAC;QACf,CAAC;QAED,KAAK,GAAG,IAAI,aAAK,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE;YACjD,gBAAgB,EAAE,IAAI,CAAC,IAAI,CAAC,gBAAgB;YAC5C,iBAAiB,EAAE,IAAI,CAAC,IAAI,CAAC,iBAAiB;YAC9C,oBAAoB,EAAE,IAAI,CAAC,IAAI,CAAC,oBAAoB;SACrD,CAAC,CAAC;QAEH,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,CAAC;QAEhD,IAAI,CAAC;YACH,MAAM,KAAK,CAAC,IAAI,EAAE,CAAC;YAEnB,sEAAsE;YACtE,aAAa;YACb,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;gBACzD,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;YAC9D,CAAC;YAED,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YAEjC,OAAO,KAAK,CAAC;QACf,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YACnB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;YACpB,MAAM,GAAG,CAAC;QACZ,CAAC;IACH,CAAC;IAED,OAAO,CAAC,KAAY;QAClB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAChC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IAC9C,CAAC;IAED,MAAM,CAAC,KAAY;QACjB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAEhC,MAAM,IAAI,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;QAE7C,MAAM,UAAU,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;QACvC,IAAI,UAAU,GAAG,CAAC,CAAC,EAAE,CAAC;YACpB,IAAI,CAAC,MAAM,CAAC,UAAU,EAAE,CAAC,CAAC,CAAC;QAC7B,CAAC;IACH,CAAC;IAED,KAAK,CAAC,IAAI,CACR,KAAY,EACZ,SAAgC,SAAS;QAEzC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QACnB,OAAO,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,kBAAkB,CAAC,CAAC;IAChD,CAAC;IAED,KAAK,CAAC,KAAK;QACT,MAAM,QAAQ,GAAG,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE,CAAC,CAAC;QACxE,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;QACnB,IAAI,CAAC,IAAI,GAAG,EAAE,CAAC;QAEf,MAAM,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC;IAChE,CAAC;IAED,OAAO,CAAC,EAAU;QAChB,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED,UAAU;QACR,OAAO,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,MAAM,CACpC,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EACvC,EAAE,CACH,CAAC;IACJ,CAAC;CACF;AAlGD,8BAkGC"}
|
||||
220
backend/node_modules/bullmq/dist/cjs/classes/child-processor.js
generated
vendored
Normal file
220
backend/node_modules/bullmq/dist/cjs/classes/child-processor.js
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ChildProcessor = void 0;
|
||||
const enums_1 = require("../enums");
|
||||
const utils_1 = require("../utils");
|
||||
var ChildStatus;
|
||||
(function (ChildStatus) {
|
||||
ChildStatus[ChildStatus["Idle"] = 0] = "Idle";
|
||||
ChildStatus[ChildStatus["Started"] = 1] = "Started";
|
||||
ChildStatus[ChildStatus["Terminating"] = 2] = "Terminating";
|
||||
ChildStatus[ChildStatus["Errored"] = 3] = "Errored";
|
||||
})(ChildStatus || (ChildStatus = {}));
|
||||
const RESPONSE_TIMEOUT = process.env.NODE_ENV === 'test' ? 500 : 5000;
|
||||
/**
|
||||
* ChildProcessor
|
||||
*
|
||||
* This class acts as the interface between a child process and it parent process
|
||||
* so that jobs can be processed in different processes.
|
||||
*
|
||||
*/
|
||||
class ChildProcessor {
|
||||
constructor(send, receiver) {
|
||||
this.send = send;
|
||||
this.receiver = receiver;
|
||||
}
|
||||
async init(processorFile) {
|
||||
let processor;
|
||||
try {
|
||||
const { default: processorFn } = await import(processorFile);
|
||||
processor = processorFn;
|
||||
if (processor.default) {
|
||||
// support es2015 module.
|
||||
processor = processor.default;
|
||||
}
|
||||
if (typeof processor !== 'function') {
|
||||
throw new Error('No function is exported in processor file');
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
this.status = ChildStatus.Errored;
|
||||
return this.send({
|
||||
cmd: enums_1.ParentCommand.InitFailed,
|
||||
err: (0, utils_1.errorToJSON)(err),
|
||||
});
|
||||
}
|
||||
const origProcessor = processor;
|
||||
processor = function (job, token) {
|
||||
try {
|
||||
return Promise.resolve(origProcessor(job, token));
|
||||
}
|
||||
catch (err) {
|
||||
return Promise.reject(err);
|
||||
}
|
||||
};
|
||||
this.processor = processor;
|
||||
this.status = ChildStatus.Idle;
|
||||
await this.send({
|
||||
cmd: enums_1.ParentCommand.InitCompleted,
|
||||
});
|
||||
}
|
||||
async start(jobJson, token) {
|
||||
if (this.status !== ChildStatus.Idle) {
|
||||
return this.send({
|
||||
cmd: enums_1.ParentCommand.Error,
|
||||
err: (0, utils_1.errorToJSON)(new Error('cannot start a not idling child process')),
|
||||
});
|
||||
}
|
||||
this.status = ChildStatus.Started;
|
||||
this.currentJobPromise = (async () => {
|
||||
try {
|
||||
const job = this.wrapJob(jobJson, this.send);
|
||||
const result = await this.processor(job, token);
|
||||
await this.send({
|
||||
cmd: enums_1.ParentCommand.Completed,
|
||||
value: typeof result === 'undefined' ? null : result,
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
await this.send({
|
||||
cmd: enums_1.ParentCommand.Failed,
|
||||
value: (0, utils_1.errorToJSON)(!err.message ? new Error(err) : err),
|
||||
});
|
||||
}
|
||||
finally {
|
||||
this.status = ChildStatus.Idle;
|
||||
this.currentJobPromise = undefined;
|
||||
}
|
||||
})();
|
||||
}
|
||||
async stop() { }
|
||||
async waitForCurrentJobAndExit() {
|
||||
this.status = ChildStatus.Terminating;
|
||||
try {
|
||||
await this.currentJobPromise;
|
||||
}
|
||||
finally {
|
||||
process.exit(process.exitCode || 0);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Enhance the given job argument with some functions
|
||||
* that can be called from the sandboxed job processor.
|
||||
*
|
||||
* Note, the `job` argument is a JSON deserialized message
|
||||
* from the main node process to this forked child process,
|
||||
* the functions on the original job object are not in tact.
|
||||
* The wrapped job adds back some of those original functions.
|
||||
*/
|
||||
wrapJob(job, send) {
|
||||
const wrappedJob = Object.assign(Object.assign({}, job), { queueQualifiedName: job.queueQualifiedName, data: JSON.parse(job.data || '{}'), opts: job.opts, returnValue: JSON.parse(job.returnvalue || '{}'),
|
||||
/*
|
||||
* Proxy `updateProgress` function, should works as `progress` function.
|
||||
*/
|
||||
async updateProgress(progress) {
|
||||
// Locally store reference to new progress value
|
||||
// so that we can return it from this process synchronously.
|
||||
this.progress = progress;
|
||||
// Send message to update job progress.
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.Progress,
|
||||
value: progress,
|
||||
});
|
||||
},
|
||||
/*
|
||||
* Proxy job `log` function.
|
||||
*/
|
||||
log: async (row) => {
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.Log,
|
||||
value: row,
|
||||
});
|
||||
},
|
||||
/*
|
||||
* Proxy `moveToDelayed` function.
|
||||
*/
|
||||
moveToDelayed: async (timestamp, token) => {
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.MoveToDelayed,
|
||||
value: { timestamp, token },
|
||||
});
|
||||
},
|
||||
/*
|
||||
* Proxy `moveToWait` function.
|
||||
*/
|
||||
moveToWait: async (token) => {
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.MoveToWait,
|
||||
value: { token },
|
||||
});
|
||||
},
|
||||
/*
|
||||
* Proxy `moveToWaitingChildren` function.
|
||||
*/
|
||||
moveToWaitingChildren: async (token, opts) => {
|
||||
const requestId = Math.random().toString(36).substring(2, 15);
|
||||
await send({
|
||||
requestId,
|
||||
cmd: enums_1.ParentCommand.MoveToWaitingChildren,
|
||||
value: { token, opts },
|
||||
});
|
||||
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'moveToWaitingChildren');
|
||||
},
|
||||
/*
|
||||
* Proxy `updateData` function.
|
||||
*/
|
||||
updateData: async (data) => {
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.Update,
|
||||
value: data,
|
||||
});
|
||||
wrappedJob.data = data;
|
||||
},
|
||||
/**
|
||||
* Proxy `getChildrenValues` function.
|
||||
*/
|
||||
getChildrenValues: async () => {
|
||||
const requestId = Math.random().toString(36).substring(2, 15);
|
||||
await send({
|
||||
requestId,
|
||||
cmd: enums_1.ParentCommand.GetChildrenValues,
|
||||
});
|
||||
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getChildrenValues');
|
||||
},
|
||||
/**
|
||||
* Proxy `getIgnoredChildrenFailures` function.
|
||||
*
|
||||
* This method sends a request to retrieve the failures of ignored children
|
||||
* and waits for a response from the parent process.
|
||||
*
|
||||
* @returns - A promise that resolves with the ignored children failures.
|
||||
* The exact structure of the returned data depends on the parent process implementation.
|
||||
*/
|
||||
getIgnoredChildrenFailures: async () => {
|
||||
const requestId = Math.random().toString(36).substring(2, 15);
|
||||
await send({
|
||||
requestId,
|
||||
cmd: enums_1.ParentCommand.GetIgnoredChildrenFailures,
|
||||
});
|
||||
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getIgnoredChildrenFailures');
|
||||
} });
|
||||
return wrappedJob;
|
||||
}
|
||||
}
|
||||
exports.ChildProcessor = ChildProcessor;
|
||||
const waitResponse = async (requestId, receiver, timeout, cmd) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const listener = (msg) => {
|
||||
if (msg.requestId === requestId) {
|
||||
resolve(msg.value);
|
||||
receiver.off('message', listener);
|
||||
}
|
||||
};
|
||||
receiver.on('message', listener);
|
||||
setTimeout(() => {
|
||||
receiver.off('message', listener);
|
||||
reject(new Error(`TimeoutError: ${cmd} timed out in (${timeout}ms)`));
|
||||
}, timeout);
|
||||
});
|
||||
};
|
||||
//# sourceMappingURL=child-processor.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/child-processor.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/child-processor.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
212
backend/node_modules/bullmq/dist/cjs/classes/child.js
generated
vendored
Normal file
212
backend/node_modules/bullmq/dist/cjs/classes/child.js
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Child = void 0;
|
||||
const child_process_1 = require("child_process");
|
||||
const net_1 = require("net");
|
||||
const worker_threads_1 = require("worker_threads");
|
||||
const enums_1 = require("../enums");
|
||||
const events_1 = require("events");
|
||||
/**
|
||||
* @see https://nodejs.org/api/process.html#process_exit_codes
|
||||
*/
|
||||
const exitCodesErrors = {
|
||||
1: 'Uncaught Fatal Exception',
|
||||
2: 'Unused',
|
||||
3: 'Internal JavaScript Parse Error',
|
||||
4: 'Internal JavaScript Evaluation Failure',
|
||||
5: 'Fatal Error',
|
||||
6: 'Non-function Internal Exception Handler',
|
||||
7: 'Internal Exception Handler Run-Time Failure',
|
||||
8: 'Unused',
|
||||
9: 'Invalid Argument',
|
||||
10: 'Internal JavaScript Run-Time Failure',
|
||||
12: 'Invalid Debug Argument',
|
||||
13: 'Unfinished Top-Level Await',
|
||||
};
|
||||
/**
|
||||
* Child class
|
||||
*
|
||||
* This class is used to create a child process or worker thread, and allows using
|
||||
* isolated processes or threads for processing jobs.
|
||||
*
|
||||
*/
|
||||
class Child extends events_1.EventEmitter {
|
||||
constructor(mainFile, processFile, opts = {
|
||||
useWorkerThreads: false,
|
||||
}) {
|
||||
super();
|
||||
this.mainFile = mainFile;
|
||||
this.processFile = processFile;
|
||||
this.opts = opts;
|
||||
this._exitCode = null;
|
||||
this._signalCode = null;
|
||||
this._killed = false;
|
||||
}
|
||||
get pid() {
|
||||
if (this.childProcess) {
|
||||
return this.childProcess.pid;
|
||||
}
|
||||
else if (this.worker) {
|
||||
// Worker threads pids can become negative when they are terminated
|
||||
// so we need to use the absolute value to index the retained object
|
||||
return Math.abs(this.worker.threadId);
|
||||
}
|
||||
else {
|
||||
throw new Error('No child process or worker thread');
|
||||
}
|
||||
}
|
||||
get exitCode() {
|
||||
return this._exitCode;
|
||||
}
|
||||
get signalCode() {
|
||||
return this._signalCode;
|
||||
}
|
||||
get killed() {
|
||||
if (this.childProcess) {
|
||||
return this.childProcess.killed;
|
||||
}
|
||||
return this._killed;
|
||||
}
|
||||
async init() {
|
||||
const execArgv = await convertExecArgv(process.execArgv);
|
||||
let parent;
|
||||
if (this.opts.useWorkerThreads) {
|
||||
this.worker = parent = new worker_threads_1.Worker(this.mainFile, Object.assign({ execArgv, stdin: true, stdout: true, stderr: true }, (this.opts.workerThreadsOptions
|
||||
? this.opts.workerThreadsOptions
|
||||
: {})));
|
||||
}
|
||||
else {
|
||||
this.childProcess = parent = (0, child_process_1.fork)(this.mainFile, [], Object.assign({ execArgv, stdio: 'pipe' }, (this.opts.workerForkOptions ? this.opts.workerForkOptions : {})));
|
||||
}
|
||||
parent.on('exit', (exitCode, signalCode) => {
|
||||
this._exitCode = exitCode;
|
||||
// Coerce to null if undefined for backwards compatibility
|
||||
signalCode = typeof signalCode === 'undefined' ? null : signalCode;
|
||||
this._signalCode = signalCode;
|
||||
this._killed = true;
|
||||
this.emit('exit', exitCode, signalCode);
|
||||
// Clean all listeners, we do not expect any more events after "exit"
|
||||
parent.removeAllListeners();
|
||||
this.removeAllListeners();
|
||||
});
|
||||
parent.on('error', (...args) => this.emit('error', ...args));
|
||||
parent.on('message', (...args) => this.emit('message', ...args));
|
||||
parent.on('close', (...args) => this.emit('close', ...args));
|
||||
parent.stdout.pipe(process.stdout);
|
||||
parent.stderr.pipe(process.stderr);
|
||||
await this.initChild();
|
||||
}
|
||||
async send(msg) {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (this.childProcess) {
|
||||
this.childProcess.send(msg, (err) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
}
|
||||
else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (this.worker) {
|
||||
resolve(this.worker.postMessage(msg));
|
||||
}
|
||||
else {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
}
|
||||
killProcess(signal = 'SIGKILL') {
|
||||
if (this.childProcess) {
|
||||
this.childProcess.kill(signal);
|
||||
}
|
||||
else if (this.worker) {
|
||||
this.worker.terminate();
|
||||
}
|
||||
}
|
||||
async kill(signal = 'SIGKILL', timeoutMs) {
|
||||
if (this.hasProcessExited()) {
|
||||
return;
|
||||
}
|
||||
const onExit = onExitOnce(this.childProcess || this.worker);
|
||||
this.killProcess(signal);
|
||||
if (timeoutMs !== undefined && (timeoutMs === 0 || isFinite(timeoutMs))) {
|
||||
const timeoutHandle = setTimeout(() => {
|
||||
if (!this.hasProcessExited()) {
|
||||
this.killProcess('SIGKILL');
|
||||
}
|
||||
}, timeoutMs);
|
||||
await onExit;
|
||||
clearTimeout(timeoutHandle);
|
||||
}
|
||||
await onExit;
|
||||
}
|
||||
async initChild() {
|
||||
const onComplete = new Promise((resolve, reject) => {
|
||||
const onMessageHandler = (msg) => {
|
||||
if (msg.cmd === enums_1.ParentCommand.InitCompleted) {
|
||||
resolve();
|
||||
}
|
||||
else if (msg.cmd === enums_1.ParentCommand.InitFailed) {
|
||||
const err = new Error();
|
||||
err.stack = msg.err.stack;
|
||||
err.message = msg.err.message;
|
||||
reject(err);
|
||||
}
|
||||
this.off('message', onMessageHandler);
|
||||
this.off('close', onCloseHandler);
|
||||
};
|
||||
const onCloseHandler = (code, signal) => {
|
||||
if (code > 128) {
|
||||
code -= 128;
|
||||
}
|
||||
const msg = exitCodesErrors[code] || `Unknown exit code ${code}`;
|
||||
reject(new Error(`Error initializing child: ${msg} and signal ${signal}`));
|
||||
this.off('message', onMessageHandler);
|
||||
this.off('close', onCloseHandler);
|
||||
};
|
||||
this.on('message', onMessageHandler);
|
||||
this.on('close', onCloseHandler);
|
||||
});
|
||||
await this.send({
|
||||
cmd: enums_1.ChildCommand.Init,
|
||||
value: this.processFile,
|
||||
});
|
||||
await onComplete;
|
||||
}
|
||||
hasProcessExited() {
|
||||
return !!(this.exitCode !== null || this.signalCode);
|
||||
}
|
||||
}
|
||||
exports.Child = Child;
|
||||
function onExitOnce(child) {
|
||||
return new Promise(resolve => {
|
||||
child.once('exit', () => resolve());
|
||||
});
|
||||
}
|
||||
const getFreePort = async () => {
|
||||
return new Promise(resolve => {
|
||||
const server = (0, net_1.createServer)();
|
||||
server.listen(0, () => {
|
||||
const { port } = server.address();
|
||||
server.close(() => resolve(port));
|
||||
});
|
||||
});
|
||||
};
|
||||
const convertExecArgv = async (execArgv) => {
|
||||
const standard = [];
|
||||
const convertedArgs = [];
|
||||
for (let i = 0; i < execArgv.length; i++) {
|
||||
const arg = execArgv[i];
|
||||
if (arg.indexOf('--inspect') === -1) {
|
||||
standard.push(arg);
|
||||
}
|
||||
else {
|
||||
const argName = arg.split('=')[0];
|
||||
const port = await getFreePort();
|
||||
convertedArgs.push(`${argName}=${port}`);
|
||||
}
|
||||
}
|
||||
return standard.concat(convertedArgs);
|
||||
};
|
||||
//# sourceMappingURL=child.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/child.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/child.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
20
backend/node_modules/bullmq/dist/cjs/classes/errors/delayed-error.js
generated
vendored
Normal file
20
backend/node_modules/bullmq/dist/cjs/classes/errors/delayed-error.js
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DelayedError = exports.DELAYED_ERROR = void 0;
|
||||
exports.DELAYED_ERROR = 'bullmq:movedToDelayed';
|
||||
/**
|
||||
* DelayedError
|
||||
*
|
||||
* Error to be thrown when job is moved to delayed state
|
||||
* from job in active state.
|
||||
*
|
||||
*/
|
||||
class DelayedError extends Error {
|
||||
constructor(message = exports.DELAYED_ERROR) {
|
||||
super(message);
|
||||
this.name = this.constructor.name;
|
||||
Object.setPrototypeOf(this, new.target.prototype);
|
||||
}
|
||||
}
|
||||
exports.DelayedError = DelayedError;
|
||||
//# sourceMappingURL=delayed-error.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/delayed-error.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/delayed-error.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"delayed-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/delayed-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,aAAa,GAAG,uBAAuB,CAAC;AAErD;;;;;;GAMG;AACH,MAAa,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,qBAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oCAMC"}
|
||||
9
backend/node_modules/bullmq/dist/cjs/classes/errors/index.js
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/cjs/classes/errors/index.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const tslib_1 = require("tslib");
|
||||
tslib_1.__exportStar(require("./delayed-error"), exports);
|
||||
tslib_1.__exportStar(require("./rate-limit-error"), exports);
|
||||
tslib_1.__exportStar(require("./unrecoverable-error"), exports);
|
||||
tslib_1.__exportStar(require("./waiting-children-error"), exports);
|
||||
tslib_1.__exportStar(require("./waiting-error"), exports);
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/index.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../src/classes/errors/index.ts"],"names":[],"mappings":";;;AAAA,0DAAgC;AAChC,6DAAmC;AACnC,gEAAsC;AACtC,mEAAyC;AACzC,0DAAgC"}
|
||||
19
backend/node_modules/bullmq/dist/cjs/classes/errors/rate-limit-error.js
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/classes/errors/rate-limit-error.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RateLimitError = exports.RATE_LIMIT_ERROR = void 0;
|
||||
exports.RATE_LIMIT_ERROR = 'bullmq:rateLimitExceeded';
|
||||
/**
|
||||
* RateLimitError
|
||||
*
|
||||
* Error to be thrown when queue reaches a rate limit.
|
||||
*
|
||||
*/
|
||||
class RateLimitError extends Error {
|
||||
constructor(message = exports.RATE_LIMIT_ERROR) {
|
||||
super(message);
|
||||
this.name = this.constructor.name;
|
||||
Object.setPrototypeOf(this, new.target.prototype);
|
||||
}
|
||||
}
|
||||
exports.RateLimitError = RateLimitError;
|
||||
//# sourceMappingURL=rate-limit-error.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/rate-limit-error.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/rate-limit-error.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"rate-limit-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/rate-limit-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,gBAAgB,GAAG,0BAA0B,CAAC;AAE3D;;;;;GAKG;AACH,MAAa,cAAe,SAAQ,KAAK;IACvC,YAAY,UAAkB,wBAAgB;QAC5C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,wCAMC"}
|
||||
20
backend/node_modules/bullmq/dist/cjs/classes/errors/unrecoverable-error.js
generated
vendored
Normal file
20
backend/node_modules/bullmq/dist/cjs/classes/errors/unrecoverable-error.js
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.UnrecoverableError = exports.UNRECOVERABLE_ERROR = void 0;
|
||||
exports.UNRECOVERABLE_ERROR = 'bullmq:unrecoverable';
|
||||
/**
|
||||
* UnrecoverableError
|
||||
*
|
||||
* Error to move a job to failed even if the attemptsMade
|
||||
* are lower than the expected limit.
|
||||
*
|
||||
*/
|
||||
class UnrecoverableError extends Error {
|
||||
constructor(message = exports.UNRECOVERABLE_ERROR) {
|
||||
super(message);
|
||||
this.name = this.constructor.name;
|
||||
Object.setPrototypeOf(this, new.target.prototype);
|
||||
}
|
||||
}
|
||||
exports.UnrecoverableError = UnrecoverableError;
|
||||
//# sourceMappingURL=unrecoverable-error.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/unrecoverable-error.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/unrecoverable-error.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"unrecoverable-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/unrecoverable-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,mBAAmB,GAAG,sBAAsB,CAAC;AAE1D;;;;;;GAMG;AACH,MAAa,kBAAmB,SAAQ,KAAK;IAC3C,YAAY,UAAkB,2BAAmB;QAC/C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,gDAMC"}
|
||||
20
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-children-error.js
generated
vendored
Normal file
20
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-children-error.js
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.WaitingChildrenError = exports.WAITING_CHILDREN_ERROR = void 0;
|
||||
exports.WAITING_CHILDREN_ERROR = 'bullmq:movedToWaitingChildren';
|
||||
/**
|
||||
* WaitingChildrenError
|
||||
*
|
||||
* Error to be thrown when job is moved to waiting-children state
|
||||
* from job in active state.
|
||||
*
|
||||
*/
|
||||
class WaitingChildrenError extends Error {
|
||||
constructor(message = exports.WAITING_CHILDREN_ERROR) {
|
||||
super(message);
|
||||
this.name = this.constructor.name;
|
||||
Object.setPrototypeOf(this, new.target.prototype);
|
||||
}
|
||||
}
|
||||
exports.WaitingChildrenError = WaitingChildrenError;
|
||||
//# sourceMappingURL=waiting-children-error.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-children-error.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-children-error.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"waiting-children-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-children-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,sBAAsB,GAAG,+BAA+B,CAAC;AAEtE;;;;;;GAMG;AACH,MAAa,oBAAqB,SAAQ,KAAK;IAC7C,YAAY,UAAkB,8BAAsB;QAClD,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oDAMC"}
|
||||
19
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-error.js
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-error.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.WaitingError = exports.WAITING_ERROR = void 0;
|
||||
exports.WAITING_ERROR = 'bullmq:movedToWait';
|
||||
/**
|
||||
* WaitingError
|
||||
*
|
||||
* Error to be thrown when job is moved to wait or prioritized state
|
||||
* from job in active state.
|
||||
*/
|
||||
class WaitingError extends Error {
|
||||
constructor(message = exports.WAITING_ERROR) {
|
||||
super(message);
|
||||
this.name = this.constructor.name;
|
||||
Object.setPrototypeOf(this, new.target.prototype);
|
||||
}
|
||||
}
|
||||
exports.WaitingError = WaitingError;
|
||||
//# sourceMappingURL=waiting-error.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-error.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/errors/waiting-error.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"waiting-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,aAAa,GAAG,oBAAoB,CAAC;AAElD;;;;;GAKG;AACH,MAAa,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,qBAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oCAMC"}
|
||||
354
backend/node_modules/bullmq/dist/cjs/classes/flow-producer.js
generated
vendored
Normal file
354
backend/node_modules/bullmq/dist/cjs/classes/flow-producer.js
generated
vendored
Normal file
@@ -0,0 +1,354 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.FlowProducer = void 0;
|
||||
const events_1 = require("events");
|
||||
const uuid_1 = require("uuid");
|
||||
const utils_1 = require("../utils");
|
||||
const job_1 = require("./job");
|
||||
const queue_keys_1 = require("./queue-keys");
|
||||
const redis_connection_1 = require("./redis-connection");
|
||||
const enums_1 = require("../enums");
|
||||
/**
|
||||
* This class allows to add jobs with dependencies between them in such
|
||||
* a way that it is possible to build complex flows.
|
||||
* Note: A flow is a tree-like structure of jobs that depend on each other.
|
||||
* Whenever the children of a given parent are completed, the parent
|
||||
* will be processed, being able to access the children's result data.
|
||||
* All Jobs can be in different queues, either children or parents,
|
||||
*/
|
||||
class FlowProducer extends events_1.EventEmitter {
|
||||
constructor(opts = { connection: {} }, Connection = redis_connection_1.RedisConnection) {
|
||||
super();
|
||||
this.opts = opts;
|
||||
this.opts = Object.assign({ prefix: 'bull' }, opts);
|
||||
this.connection = new Connection(opts.connection, {
|
||||
shared: (0, utils_1.isRedisInstance)(opts.connection),
|
||||
blocking: false,
|
||||
skipVersionCheck: opts.skipVersionCheck,
|
||||
skipWaitingForReady: opts.skipWaitingForReady,
|
||||
});
|
||||
this.connection.on('error', (error) => this.emit('error', error));
|
||||
this.connection.on('close', () => {
|
||||
if (!this.closing) {
|
||||
this.emit('ioredis:close');
|
||||
}
|
||||
});
|
||||
this.queueKeys = new queue_keys_1.QueueKeys(opts.prefix);
|
||||
if (opts === null || opts === void 0 ? void 0 : opts.telemetry) {
|
||||
this.telemetry = opts.telemetry;
|
||||
}
|
||||
}
|
||||
emit(event, ...args) {
|
||||
return super.emit(event, ...args);
|
||||
}
|
||||
off(eventName, listener) {
|
||||
super.off(eventName, listener);
|
||||
return this;
|
||||
}
|
||||
on(event, listener) {
|
||||
super.on(event, listener);
|
||||
return this;
|
||||
}
|
||||
once(event, listener) {
|
||||
super.once(event, listener);
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
|
||||
*/
|
||||
get client() {
|
||||
return this.connection.client;
|
||||
}
|
||||
/**
|
||||
* Helper to easily extend Job class calls.
|
||||
*/
|
||||
get Job() {
|
||||
return job_1.Job;
|
||||
}
|
||||
waitUntilReady() {
|
||||
return this.client;
|
||||
}
|
||||
/**
|
||||
* Adds a flow.
|
||||
*
|
||||
* This call would be atomic, either it fails and no jobs will
|
||||
* be added to the queues, or it succeeds and all jobs will be added.
|
||||
*
|
||||
* @param flow - an object with a tree-like structure where children jobs
|
||||
* will be processed before their parents.
|
||||
* @param opts - options that will be applied to the flow object.
|
||||
*/
|
||||
async add(flow, opts) {
|
||||
var _a;
|
||||
if (this.closing) {
|
||||
return;
|
||||
}
|
||||
const client = await this.connection.client;
|
||||
const multi = client.multi();
|
||||
const parentOpts = (_a = flow === null || flow === void 0 ? void 0 : flow.opts) === null || _a === void 0 ? void 0 : _a.parent;
|
||||
const parentKey = (0, utils_1.getParentKey)(parentOpts);
|
||||
const parentDependenciesKey = parentKey
|
||||
? `${parentKey}:dependencies`
|
||||
: undefined;
|
||||
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, flow.queueName, 'addFlow', flow.queueName, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.FlowName]: flow.name,
|
||||
});
|
||||
const jobsTree = await this.addNode({
|
||||
multi,
|
||||
node: flow,
|
||||
queuesOpts: opts === null || opts === void 0 ? void 0 : opts.queuesOptions,
|
||||
parent: {
|
||||
parentOpts,
|
||||
parentDependenciesKey,
|
||||
},
|
||||
});
|
||||
await multi.exec();
|
||||
return jobsTree;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Get a flow.
|
||||
*
|
||||
* @param opts - an object with options for getting a JobNode.
|
||||
*/
|
||||
async getFlow(opts) {
|
||||
if (this.closing) {
|
||||
return;
|
||||
}
|
||||
const client = await this.connection.client;
|
||||
const updatedOpts = Object.assign({
|
||||
depth: 10,
|
||||
maxChildren: 20,
|
||||
prefix: this.opts.prefix,
|
||||
}, opts);
|
||||
const jobsTree = this.getNode(client, updatedOpts);
|
||||
return jobsTree;
|
||||
}
|
||||
/**
|
||||
* Adds multiple flows.
|
||||
*
|
||||
* A flow is a tree-like structure of jobs that depend on each other.
|
||||
* Whenever the children of a given parent are completed, the parent
|
||||
* will be processed, being able to access the children's result data.
|
||||
*
|
||||
* All Jobs can be in different queues, either children or parents,
|
||||
* however this call would be atomic, either it fails and no jobs will
|
||||
* be added to the queues, or it succeeds and all jobs will be added.
|
||||
*
|
||||
* @param flows - an array of objects with a tree-like structure where children jobs
|
||||
* will be processed before their parents.
|
||||
*/
|
||||
async addBulk(flows) {
|
||||
if (this.closing) {
|
||||
return;
|
||||
}
|
||||
const client = await this.connection.client;
|
||||
const multi = client.multi();
|
||||
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, '', 'addBulkFlows', '', async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.BulkCount]: flows.length,
|
||||
[enums_1.TelemetryAttributes.BulkNames]: flows
|
||||
.map(flow => flow.name)
|
||||
.join(','),
|
||||
});
|
||||
const jobsTrees = await this.addNodes(multi, flows);
|
||||
await multi.exec();
|
||||
return jobsTrees;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Add a node (job) of a flow to the queue. This method will recursively
|
||||
* add all its children as well. Note that a given job can potentially be
|
||||
* a parent and a child job at the same time depending on where it is located
|
||||
* in the tree hierarchy.
|
||||
*
|
||||
* @param multi - ioredis ChainableCommander
|
||||
* @param node - the node representing a job to be added to some queue
|
||||
* @param parent - parent data sent to children to create the "links" to their parent
|
||||
* @returns
|
||||
*/
|
||||
async addNode({ multi, node, parent, queuesOpts, }) {
|
||||
var _a, _b;
|
||||
const prefix = node.prefix || this.opts.prefix;
|
||||
const queue = this.queueFromNode(node, new queue_keys_1.QueueKeys(prefix), prefix);
|
||||
const queueOpts = queuesOpts && queuesOpts[node.queueName];
|
||||
const jobsOpts = (_a = queueOpts === null || queueOpts === void 0 ? void 0 : queueOpts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
|
||||
const jobId = ((_b = node.opts) === null || _b === void 0 ? void 0 : _b.jobId) || (0, uuid_1.v4)();
|
||||
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, node.queueName, 'addNode', node.queueName, async (span, srcPropagationMedatada) => {
|
||||
var _a, _b;
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobName]: node.name,
|
||||
[enums_1.TelemetryAttributes.JobId]: jobId,
|
||||
});
|
||||
const opts = node.opts;
|
||||
let telemetry = opts === null || opts === void 0 ? void 0 : opts.telemetry;
|
||||
if (srcPropagationMedatada && opts) {
|
||||
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
|
||||
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
|
||||
(!omitContext && srcPropagationMedatada);
|
||||
if (telemetryMetadata || omitContext) {
|
||||
telemetry = {
|
||||
metadata: telemetryMetadata,
|
||||
omitContext,
|
||||
};
|
||||
}
|
||||
}
|
||||
const job = new this.Job(queue, node.name, node.data, Object.assign(Object.assign(Object.assign({}, jobsOpts), opts), { parent: parent === null || parent === void 0 ? void 0 : parent.parentOpts, telemetry }), jobId);
|
||||
const parentKey = (0, utils_1.getParentKey)(parent === null || parent === void 0 ? void 0 : parent.parentOpts);
|
||||
if (node.children && node.children.length > 0) {
|
||||
// Create the parent job, it will be a job in status "waiting-children".
|
||||
const parentId = jobId;
|
||||
const queueKeysParent = new queue_keys_1.QueueKeys(node.prefix || this.opts.prefix);
|
||||
await job.addJob(multi, {
|
||||
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
|
||||
addToWaitingChildren: true,
|
||||
parentKey,
|
||||
});
|
||||
const parentDependenciesKey = `${queueKeysParent.toKey(node.queueName, parentId)}:dependencies`;
|
||||
const children = await this.addChildren({
|
||||
multi,
|
||||
nodes: node.children,
|
||||
parent: {
|
||||
parentOpts: {
|
||||
id: parentId,
|
||||
queue: queueKeysParent.getQueueQualifiedName(node.queueName),
|
||||
},
|
||||
parentDependenciesKey,
|
||||
},
|
||||
queuesOpts,
|
||||
});
|
||||
return { job, children };
|
||||
}
|
||||
else {
|
||||
await job.addJob(multi, {
|
||||
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
|
||||
parentKey,
|
||||
});
|
||||
return { job };
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Adds nodes (jobs) of multiple flows to the queue. This method will recursively
|
||||
* add all its children as well. Note that a given job can potentially be
|
||||
* a parent and a child job at the same time depending on where it is located
|
||||
* in the tree hierarchy.
|
||||
*
|
||||
* @param multi - ioredis ChainableCommander
|
||||
* @param nodes - the nodes representing jobs to be added to some queue
|
||||
* @returns
|
||||
*/
|
||||
addNodes(multi, nodes) {
|
||||
return Promise.all(nodes.map(node => {
|
||||
var _a;
|
||||
const parentOpts = (_a = node === null || node === void 0 ? void 0 : node.opts) === null || _a === void 0 ? void 0 : _a.parent;
|
||||
const parentKey = (0, utils_1.getParentKey)(parentOpts);
|
||||
const parentDependenciesKey = parentKey
|
||||
? `${parentKey}:dependencies`
|
||||
: undefined;
|
||||
return this.addNode({
|
||||
multi,
|
||||
node,
|
||||
parent: {
|
||||
parentOpts,
|
||||
parentDependenciesKey,
|
||||
},
|
||||
});
|
||||
}));
|
||||
}
|
||||
async getNode(client, node) {
|
||||
const queue = this.queueFromNode(node, new queue_keys_1.QueueKeys(node.prefix), node.prefix);
|
||||
const job = await this.Job.fromId(queue, node.id);
|
||||
if (job) {
|
||||
const { processed = {}, unprocessed = [], failed = [], ignored = {}, } = await job.getDependencies({
|
||||
failed: {
|
||||
count: node.maxChildren,
|
||||
},
|
||||
processed: {
|
||||
count: node.maxChildren,
|
||||
},
|
||||
unprocessed: {
|
||||
count: node.maxChildren,
|
||||
},
|
||||
ignored: {
|
||||
count: node.maxChildren,
|
||||
},
|
||||
});
|
||||
const processedKeys = Object.keys(processed);
|
||||
const ignoredKeys = Object.keys(ignored);
|
||||
const childrenCount = processedKeys.length +
|
||||
unprocessed.length +
|
||||
ignoredKeys.length +
|
||||
failed.length;
|
||||
const newDepth = node.depth - 1;
|
||||
if (childrenCount > 0 && newDepth) {
|
||||
const children = await this.getChildren(client, [...processedKeys, ...unprocessed, ...failed, ...ignoredKeys], newDepth, node.maxChildren);
|
||||
return { job, children };
|
||||
}
|
||||
else {
|
||||
return { job };
|
||||
}
|
||||
}
|
||||
}
|
||||
addChildren({ multi, nodes, parent, queuesOpts }) {
|
||||
return Promise.all(nodes.map(node => this.addNode({ multi, node, parent, queuesOpts })));
|
||||
}
|
||||
getChildren(client, childrenKeys, depth, maxChildren) {
|
||||
const getChild = (key) => {
|
||||
const [prefix, queueName, id] = key.split(':');
|
||||
return this.getNode(client, {
|
||||
id,
|
||||
queueName,
|
||||
prefix,
|
||||
depth,
|
||||
maxChildren,
|
||||
});
|
||||
};
|
||||
return Promise.all([...childrenKeys.map(getChild)]);
|
||||
}
|
||||
/**
|
||||
* Helper factory method that creates a queue-like object
|
||||
* required to create jobs in any queue.
|
||||
*
|
||||
* @param node -
|
||||
* @param queueKeys -
|
||||
* @returns
|
||||
*/
|
||||
queueFromNode(node, queueKeys, prefix) {
|
||||
return {
|
||||
client: this.connection.client,
|
||||
name: node.queueName,
|
||||
keys: queueKeys.getKeys(node.queueName),
|
||||
toKey: (type) => queueKeys.toKey(node.queueName, type),
|
||||
opts: { prefix, connection: {} },
|
||||
qualifiedName: queueKeys.getQueueQualifiedName(node.queueName),
|
||||
closing: this.closing,
|
||||
waitUntilReady: async () => this.connection.client,
|
||||
removeListener: this.removeListener.bind(this),
|
||||
emit: this.emit.bind(this),
|
||||
on: this.on.bind(this),
|
||||
redisVersion: this.connection.redisVersion,
|
||||
trace: async () => { },
|
||||
};
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Closes the connection and returns a promise that resolves when the connection is closed.
|
||||
*/
|
||||
async close() {
|
||||
if (!this.closing) {
|
||||
this.closing = this.connection.close();
|
||||
}
|
||||
await this.closing;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Force disconnects a connection.
|
||||
*/
|
||||
disconnect() {
|
||||
return this.connection.disconnect();
|
||||
}
|
||||
}
|
||||
exports.FlowProducer = FlowProducer;
|
||||
//# sourceMappingURL=flow-producer.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/flow-producer.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/flow-producer.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
27
backend/node_modules/bullmq/dist/cjs/classes/index.js
generated
vendored
Normal file
27
backend/node_modules/bullmq/dist/cjs/classes/index.js
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const tslib_1 = require("tslib");
|
||||
tslib_1.__exportStar(require("./async-fifo-queue"), exports);
|
||||
tslib_1.__exportStar(require("./backoffs"), exports);
|
||||
tslib_1.__exportStar(require("./child"), exports);
|
||||
tslib_1.__exportStar(require("./child-pool"), exports);
|
||||
tslib_1.__exportStar(require("./child-processor"), exports);
|
||||
tslib_1.__exportStar(require("./errors"), exports);
|
||||
tslib_1.__exportStar(require("./flow-producer"), exports);
|
||||
tslib_1.__exportStar(require("./job"), exports);
|
||||
tslib_1.__exportStar(require("./job-scheduler"), exports);
|
||||
// export * from './main'; this file must not be exported
|
||||
// export * from './main-worker'; this file must not be exported
|
||||
tslib_1.__exportStar(require("./lock-manager"), exports);
|
||||
tslib_1.__exportStar(require("./queue-base"), exports);
|
||||
tslib_1.__exportStar(require("./queue-events"), exports);
|
||||
tslib_1.__exportStar(require("./queue-events-producer"), exports);
|
||||
tslib_1.__exportStar(require("./queue-getters"), exports);
|
||||
tslib_1.__exportStar(require("./queue-keys"), exports);
|
||||
tslib_1.__exportStar(require("./queue"), exports);
|
||||
tslib_1.__exportStar(require("./redis-connection"), exports);
|
||||
tslib_1.__exportStar(require("./repeat"), exports);
|
||||
tslib_1.__exportStar(require("./sandbox"), exports);
|
||||
tslib_1.__exportStar(require("./scripts"), exports);
|
||||
tslib_1.__exportStar(require("./worker"), exports);
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/index.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/classes/index.ts"],"names":[],"mappings":";;;AAAA,6DAAmC;AACnC,qDAA2B;AAC3B,kDAAwB;AACxB,uDAA6B;AAC7B,4DAAkC;AAClC,mDAAyB;AACzB,0DAAgC;AAChC,gDAAsB;AACtB,0DAAgC;AAChC,yDAAyD;AACzD,gEAAgE;AAChE,yDAA+B;AAC/B,uDAA6B;AAC7B,yDAA+B;AAC/B,kEAAwC;AACxC,0DAAgC;AAChC,uDAA6B;AAC7B,kDAAwB;AACxB,6DAAmC;AACnC,mDAAyB;AACzB,oDAA0B;AAC1B,oDAA0B;AAC1B,mDAAyB"}
|
||||
247
backend/node_modules/bullmq/dist/cjs/classes/job-scheduler.js
generated
vendored
Normal file
247
backend/node_modules/bullmq/dist/cjs/classes/job-scheduler.js
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.defaultRepeatStrategy = exports.JobScheduler = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const cron_parser_1 = require("cron-parser");
|
||||
const job_1 = require("./job");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
const enums_1 = require("../enums");
|
||||
const utils_1 = require("../utils");
|
||||
class JobScheduler extends queue_base_1.QueueBase {
|
||||
constructor(name, opts, Connection) {
|
||||
super(name, opts, Connection);
|
||||
this.repeatStrategy =
|
||||
(opts.settings && opts.settings.repeatStrategy) || exports.defaultRepeatStrategy;
|
||||
}
|
||||
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobName, jobData, opts, { override, producerId }) {
|
||||
const { every, limit, pattern, offset } = repeatOpts;
|
||||
if (pattern && every) {
|
||||
throw new Error('Both .pattern and .every options are defined for this repeatable job');
|
||||
}
|
||||
if (!pattern && !every) {
|
||||
throw new Error('Either .pattern or .every options must be defined for this repeatable job');
|
||||
}
|
||||
if (repeatOpts.immediately && repeatOpts.startDate) {
|
||||
throw new Error('Both .immediately and .startDate options are defined for this repeatable job');
|
||||
}
|
||||
if (repeatOpts.immediately && repeatOpts.every) {
|
||||
console.warn("Using option immediately with every does not affect the job's schedule. Job will run immediately anyway.");
|
||||
}
|
||||
// Check if we reached the limit of the repeatable job's iterations
|
||||
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
|
||||
if (typeof repeatOpts.limit !== 'undefined' &&
|
||||
iterationCount > repeatOpts.limit) {
|
||||
return;
|
||||
}
|
||||
// Check if we reached the end date of the repeatable job
|
||||
let now = Date.now();
|
||||
const { endDate } = repeatOpts;
|
||||
if (endDate && now > new Date(endDate).getTime()) {
|
||||
return;
|
||||
}
|
||||
const prevMillis = opts.prevMillis || 0;
|
||||
now = prevMillis < now ? now : prevMillis;
|
||||
// Check if we have a start date for the repeatable job
|
||||
const { immediately } = repeatOpts, filteredRepeatOpts = tslib_1.__rest(repeatOpts, ["immediately"]);
|
||||
let nextMillis;
|
||||
const newOffset = null;
|
||||
if (pattern) {
|
||||
nextMillis = await this.repeatStrategy(now, repeatOpts, jobName);
|
||||
if (nextMillis < now) {
|
||||
nextMillis = now;
|
||||
}
|
||||
}
|
||||
if (nextMillis || every) {
|
||||
return this.trace(enums_1.SpanKind.PRODUCER, 'add', `${this.name}.${jobName}`, async (span, srcPropagationMedatada) => {
|
||||
var _a, _b;
|
||||
let telemetry = opts.telemetry;
|
||||
if (srcPropagationMedatada) {
|
||||
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
|
||||
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
|
||||
(!omitContext && srcPropagationMedatada);
|
||||
if (telemetryMetadata || omitContext) {
|
||||
telemetry = {
|
||||
metadata: telemetryMetadata,
|
||||
omitContext,
|
||||
};
|
||||
}
|
||||
}
|
||||
const mergedOpts = this.getNextJobOpts(nextMillis, jobSchedulerId, Object.assign(Object.assign({}, opts), { repeat: filteredRepeatOpts, telemetry }), iterationCount, newOffset);
|
||||
if (override) {
|
||||
// Clamp nextMillis to now if it's in the past
|
||||
if (nextMillis < now) {
|
||||
nextMillis = now;
|
||||
}
|
||||
const [jobId, delay] = await this.scripts.addJobScheduler(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), job_1.Job.optsAsJSON(opts), {
|
||||
name: jobName,
|
||||
startDate: repeatOpts.startDate
|
||||
? new Date(repeatOpts.startDate).getTime()
|
||||
: undefined,
|
||||
endDate: endDate ? new Date(endDate).getTime() : undefined,
|
||||
tz: repeatOpts.tz,
|
||||
pattern,
|
||||
every,
|
||||
limit,
|
||||
offset: newOffset,
|
||||
}, job_1.Job.optsAsJSON(mergedOpts), producerId);
|
||||
// Ensure delay is a number (Dragonflydb may return it as a string)
|
||||
const numericDelay = typeof delay === 'string' ? parseInt(delay, 10) : delay;
|
||||
const job = new this.Job(this, jobName, jobData, Object.assign(Object.assign({}, mergedOpts), { delay: numericDelay }), jobId);
|
||||
job.id = jobId;
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
|
||||
[enums_1.TelemetryAttributes.JobId]: job.id,
|
||||
});
|
||||
return job;
|
||||
}
|
||||
else {
|
||||
const jobId = await this.scripts.updateJobSchedulerNextMillis(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), job_1.Job.optsAsJSON(mergedOpts), producerId);
|
||||
if (jobId) {
|
||||
const job = new this.Job(this, jobName, jobData, mergedOpts, jobId);
|
||||
job.id = jobId;
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
|
||||
[enums_1.TelemetryAttributes.JobId]: job.id,
|
||||
});
|
||||
return job;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
getNextJobOpts(nextMillis, jobSchedulerId, opts, currentCount, offset) {
|
||||
var _a, _b;
|
||||
//
|
||||
// Generate unique job id for this iteration.
|
||||
//
|
||||
const jobId = this.getSchedulerNextJobId({
|
||||
jobSchedulerId,
|
||||
nextMillis,
|
||||
});
|
||||
const now = Date.now();
|
||||
const delay = nextMillis + offset - now;
|
||||
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey: jobSchedulerId });
|
||||
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { offset, count: currentCount, startDate: ((_a = opts.repeat) === null || _a === void 0 ? void 0 : _a.startDate)
|
||||
? new Date(opts.repeat.startDate).getTime()
|
||||
: undefined, endDate: ((_b = opts.repeat) === null || _b === void 0 ? void 0 : _b.endDate)
|
||||
? new Date(opts.repeat.endDate).getTime()
|
||||
: undefined });
|
||||
return mergedOpts;
|
||||
}
|
||||
async removeJobScheduler(jobSchedulerId) {
|
||||
return this.scripts.removeJobScheduler(jobSchedulerId);
|
||||
}
|
||||
async getSchedulerData(client, key, next) {
|
||||
const jobData = await client.hgetall(this.toKey('repeat:' + key));
|
||||
return this.transformSchedulerData(key, jobData, next);
|
||||
}
|
||||
transformSchedulerData(key, jobData, next) {
|
||||
if (jobData) {
|
||||
const jobSchedulerData = {
|
||||
key,
|
||||
name: jobData.name,
|
||||
next,
|
||||
};
|
||||
if (jobData.ic) {
|
||||
jobSchedulerData.iterationCount = parseInt(jobData.ic);
|
||||
}
|
||||
if (jobData.limit) {
|
||||
jobSchedulerData.limit = parseInt(jobData.limit);
|
||||
}
|
||||
if (jobData.startDate) {
|
||||
jobSchedulerData.startDate = parseInt(jobData.startDate);
|
||||
}
|
||||
if (jobData.endDate) {
|
||||
jobSchedulerData.endDate = parseInt(jobData.endDate);
|
||||
}
|
||||
if (jobData.tz) {
|
||||
jobSchedulerData.tz = jobData.tz;
|
||||
}
|
||||
if (jobData.pattern) {
|
||||
jobSchedulerData.pattern = jobData.pattern;
|
||||
}
|
||||
if (jobData.every) {
|
||||
jobSchedulerData.every = parseInt(jobData.every);
|
||||
}
|
||||
if (jobData.offset) {
|
||||
jobSchedulerData.offset = parseInt(jobData.offset);
|
||||
}
|
||||
if (jobData.data || jobData.opts) {
|
||||
jobSchedulerData.template = this.getTemplateFromJSON(jobData.data, jobData.opts);
|
||||
}
|
||||
return jobSchedulerData;
|
||||
}
|
||||
// TODO: remove this check and keyToData as it is here only to support legacy code
|
||||
if (key.includes(':')) {
|
||||
return this.keyToData(key, next);
|
||||
}
|
||||
}
|
||||
keyToData(key, next) {
|
||||
const data = key.split(':');
|
||||
const pattern = data.slice(4).join(':') || null;
|
||||
return {
|
||||
key,
|
||||
name: data[0],
|
||||
id: data[1] || null,
|
||||
endDate: parseInt(data[2]) || null,
|
||||
tz: data[3] || null,
|
||||
pattern,
|
||||
next,
|
||||
};
|
||||
}
|
||||
async getScheduler(id) {
|
||||
const [rawJobData, next] = await this.scripts.getJobScheduler(id);
|
||||
return this.transformSchedulerData(id, rawJobData ? (0, utils_1.array2obj)(rawJobData) : null, next ? parseInt(next) : null);
|
||||
}
|
||||
getTemplateFromJSON(rawData, rawOpts) {
|
||||
const template = {};
|
||||
if (rawData) {
|
||||
template.data = JSON.parse(rawData);
|
||||
}
|
||||
if (rawOpts) {
|
||||
template.opts = job_1.Job.optsFromJSON(rawOpts);
|
||||
}
|
||||
return template;
|
||||
}
|
||||
async getJobSchedulers(start = 0, end = -1, asc = false) {
|
||||
const client = await this.client;
|
||||
const jobSchedulersKey = this.keys.repeat;
|
||||
const result = asc
|
||||
? await client.zrange(jobSchedulersKey, start, end, 'WITHSCORES')
|
||||
: await client.zrevrange(jobSchedulersKey, start, end, 'WITHSCORES');
|
||||
const jobs = [];
|
||||
for (let i = 0; i < result.length; i += 2) {
|
||||
jobs.push(this.getSchedulerData(client, result[i], parseInt(result[i + 1])));
|
||||
}
|
||||
return Promise.all(jobs);
|
||||
}
|
||||
async getSchedulersCount() {
|
||||
const jobSchedulersKey = this.keys.repeat;
|
||||
const client = await this.client;
|
||||
return client.zcard(jobSchedulersKey);
|
||||
}
|
||||
getSchedulerNextJobId({ nextMillis, jobSchedulerId, }) {
|
||||
return `repeat:${jobSchedulerId}:${nextMillis}`;
|
||||
}
|
||||
}
|
||||
exports.JobScheduler = JobScheduler;
|
||||
const defaultRepeatStrategy = (millis, opts) => {
|
||||
const { pattern } = opts;
|
||||
const dateFromMillis = new Date(millis);
|
||||
const startDate = opts.startDate && new Date(opts.startDate);
|
||||
const currentDate = startDate > dateFromMillis ? startDate : dateFromMillis;
|
||||
const interval = (0, cron_parser_1.parseExpression)(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
|
||||
try {
|
||||
if (opts.immediately) {
|
||||
return new Date().getTime();
|
||||
}
|
||||
else {
|
||||
return interval.next().getTime();
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// Ignore error
|
||||
}
|
||||
};
|
||||
exports.defaultRepeatStrategy = defaultRepeatStrategy;
|
||||
//# sourceMappingURL=job-scheduler.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/job-scheduler.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/job-scheduler.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1053
backend/node_modules/bullmq/dist/cjs/classes/job.js
generated
vendored
Normal file
1053
backend/node_modules/bullmq/dist/cjs/classes/job.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
backend/node_modules/bullmq/dist/cjs/classes/job.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/job.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
165
backend/node_modules/bullmq/dist/cjs/classes/lock-manager.js
generated
vendored
Normal file
165
backend/node_modules/bullmq/dist/cjs/classes/lock-manager.js
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.LockManager = void 0;
|
||||
const node_abort_controller_1 = require("node-abort-controller");
|
||||
const enums_1 = require("../enums");
|
||||
/**
|
||||
* Manages lock renewal for BullMQ workers.
|
||||
* It periodically extends locks for active jobs to prevent them from being
|
||||
* considered stalled by other workers.
|
||||
*/
|
||||
class LockManager {
|
||||
constructor(worker, opts) {
|
||||
this.worker = worker;
|
||||
this.opts = opts;
|
||||
// Maps job ids with their tokens, timestamps, and abort controllers
|
||||
this.trackedJobs = new Map();
|
||||
this.closed = false;
|
||||
}
|
||||
/**
|
||||
* Starts the lock manager timers for lock renewal.
|
||||
*/
|
||||
start() {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
// Start lock renewal timer if not disabled
|
||||
if (this.opts.lockRenewTime > 0) {
|
||||
this.startLockExtenderTimer();
|
||||
}
|
||||
}
|
||||
async extendLocks(jobIds) {
|
||||
await this.worker.trace(enums_1.SpanKind.INTERNAL, 'extendLocks', this.worker.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.opts.workerId,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.workerName,
|
||||
[enums_1.TelemetryAttributes.WorkerJobsToExtendLocks]: jobIds,
|
||||
});
|
||||
try {
|
||||
const jobTokens = jobIds.map(id => { var _a; return ((_a = this.trackedJobs.get(id)) === null || _a === void 0 ? void 0 : _a.token) || ''; });
|
||||
const erroredJobIds = await this.worker.extendJobLocks(jobIds, jobTokens, this.opts.lockDuration);
|
||||
if (erroredJobIds.length > 0) {
|
||||
this.worker.emit('lockRenewalFailed', erroredJobIds);
|
||||
for (const jobId of erroredJobIds) {
|
||||
this.worker.emit('error', new Error(`could not renew lock for job ${jobId}`));
|
||||
}
|
||||
}
|
||||
const succeededJobIds = jobIds.filter(id => !erroredJobIds.includes(id));
|
||||
if (succeededJobIds.length > 0) {
|
||||
this.worker.emit('locksRenewed', {
|
||||
count: succeededJobIds.length,
|
||||
jobIds: succeededJobIds,
|
||||
});
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
this.worker.emit('error', err);
|
||||
}
|
||||
});
|
||||
}
|
||||
startLockExtenderTimer() {
|
||||
clearTimeout(this.lockRenewalTimer);
|
||||
if (!this.closed) {
|
||||
this.lockRenewalTimer = setTimeout(async () => {
|
||||
// Get all the jobs whose locks expire in less than 1/2 of the lockRenewTime
|
||||
const now = Date.now();
|
||||
const jobsToExtend = [];
|
||||
for (const jobId of this.trackedJobs.keys()) {
|
||||
const tracked = this.trackedJobs.get(jobId);
|
||||
const { ts, token, abortController } = tracked;
|
||||
if (!ts) {
|
||||
this.trackedJobs.set(jobId, { token, ts: now, abortController });
|
||||
continue;
|
||||
}
|
||||
if (ts + this.opts.lockRenewTime / 2 < now) {
|
||||
this.trackedJobs.set(jobId, { token, ts: now, abortController });
|
||||
jobsToExtend.push(jobId);
|
||||
}
|
||||
}
|
||||
if (jobsToExtend.length) {
|
||||
await this.extendLocks(jobsToExtend);
|
||||
}
|
||||
this.startLockExtenderTimer();
|
||||
}, this.opts.lockRenewTime / 2);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Stops the lock manager and clears all timers.
|
||||
*/
|
||||
async close() {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
this.closed = true;
|
||||
if (this.lockRenewalTimer) {
|
||||
clearTimeout(this.lockRenewalTimer);
|
||||
this.lockRenewalTimer = undefined;
|
||||
}
|
||||
this.trackedJobs.clear();
|
||||
}
|
||||
/**
|
||||
* Adds a job to be tracked for lock renewal.
|
||||
* Returns an AbortController if shouldCreateController is true, undefined otherwise.
|
||||
*/
|
||||
trackJob(jobId, token, ts, shouldCreateController = false) {
|
||||
const abortController = shouldCreateController
|
||||
? new node_abort_controller_1.AbortController()
|
||||
: undefined;
|
||||
if (!this.closed && jobId) {
|
||||
this.trackedJobs.set(jobId, { token, ts, abortController });
|
||||
}
|
||||
return abortController;
|
||||
}
|
||||
/**
|
||||
* Removes a job from lock renewal tracking.
|
||||
*/
|
||||
untrackJob(jobId) {
|
||||
this.trackedJobs.delete(jobId);
|
||||
}
|
||||
/**
|
||||
* Gets the number of jobs currently being tracked.
|
||||
*/
|
||||
getActiveJobCount() {
|
||||
return this.trackedJobs.size;
|
||||
}
|
||||
/**
|
||||
* Checks if the lock manager is running.
|
||||
*/
|
||||
isRunning() {
|
||||
return !this.closed && this.lockRenewalTimer !== undefined;
|
||||
}
|
||||
/**
|
||||
* Cancels a specific job by aborting its signal.
|
||||
* @param jobId - The ID of the job to cancel
|
||||
* @param reason - Optional reason for the cancellation
|
||||
* @returns true if the job was found and cancelled, false otherwise
|
||||
*/
|
||||
cancelJob(jobId, reason) {
|
||||
const tracked = this.trackedJobs.get(jobId);
|
||||
if (tracked === null || tracked === void 0 ? void 0 : tracked.abortController) {
|
||||
tracked.abortController.abort(reason);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Cancels all tracked jobs by aborting their signals.
|
||||
* @param reason - Optional reason for the cancellation
|
||||
*/
|
||||
cancelAllJobs(reason) {
|
||||
for (const tracked of this.trackedJobs.values()) {
|
||||
if (tracked.abortController) {
|
||||
tracked.abortController.abort(reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Gets a list of all tracked job IDs.
|
||||
* @returns Array of job IDs currently being tracked
|
||||
*/
|
||||
getTrackedJobIds() {
|
||||
return Array.from(this.trackedJobs.keys());
|
||||
}
|
||||
}
|
||||
exports.LockManager = LockManager;
|
||||
//# sourceMappingURL=lock-manager.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/lock-manager.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/lock-manager.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"lock-manager.js","sourceRoot":"","sources":["../../../src/classes/lock-manager.ts"],"names":[],"mappings":";;;AAAA,iEAAwD;AACxD,oCAAyD;AAUzD;;;;GAIG;AACH,MAAa,WAAW;IAUtB,YACY,MAAgC,EAChC,IAAwB;QADxB,WAAM,GAAN,MAAM,CAA0B;QAChC,SAAI,GAAJ,IAAI,CAAoB;QATpC,oEAAoE;QAC1D,gBAAW,GAAG,IAAI,GAAG,EAG5B,CAAC;QACM,WAAM,GAAG,KAAK,CAAC;IAKtB,CAAC;IAEJ;;OAEG;IACH,KAAK;QACH,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,2CAA2C;QAC3C,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,EAAE,CAAC;YAChC,IAAI,CAAC,sBAAsB,EAAE,CAAC;QAChC,CAAC;IACH,CAAC;IAES,KAAK,CAAC,WAAW,CAAC,MAAgB;QAC1C,MAAM,IAAI,CAAC,MAAM,CAAC,KAAK,CACrB,gBAAQ,CAAC,QAAQ,EACjB,aAAa,EACb,IAAI,CAAC,MAAM,CAAC,IAAI,EAChB,KAAK,EAAE,IAAW,EAAE,EAAE;YACpB,IAAI,aAAJ,IAAI,uBAAJ,IAAI,CAAE,aAAa,CAAC;gBAClB,CAAC,2BAAmB,CAAC,QAAQ,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,QAAQ;gBAClD,CAAC,2BAAmB,CAAC,UAAU,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,UAAU;gBACtD,CAAC,2BAAmB,CAAC,uBAAuB,CAAC,EAAE,MAAM;aACtD,CAAC,CAAC;YAEH,IAAI,CAAC;gBACH,MAAM,SAAS,GAAG,MAAM,CAAC,GAAG,CAC1B,EAAE,CAAC,EAAE,WAAC,OAAA,CAAA,MAAA,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,0CAAE,KAAK,KAAI,EAAE,CAAA,EAAA,CAC5C,CAAC;gBAEF,MAAM,aAAa,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,cAAc,CACpD,MAAM,EACN,SAAS,EACT,IAAI,CAAC,IAAI,CAAC,YAAY,CACvB,CAAC;gBAEF,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,mBAAmB,EAAE,aAAa,CAAC,CAAC;oBAErD,KAAK,MAAM,KAAK,IAAI,aAAa,EAAE,CAAC;wBAClC,IAAI,CAAC,MAAM,CAAC,IAAI,CACd,OAAO,EACP,IAAI,KAAK,CAAC,gCAAgC,KAAK,EAAE,CAAC,CACnD,CAAC;oBACJ,CAAC;gBACH,CAAC;gBAED,MAAM,eAAe,GAAG,MAAM,CAAC,MAAM,CACnC,EAAE,CAAC,EAAE,CAAC,CAAC,aAAa,CAAC,QAAQ,CAAC,EAAE,CAAC,CAClC,CAAC;gBAEF,IAAI,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC/B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,cAAc,EAAE;wBAC/B,KAAK,EAAE,eAAe,CAAC,MAAM;wBAC7B,MAAM,EAAE,eAAe;qBACxB,CAAC,CAAC;gBACL,CAAC;YACH,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,EAAE,GAAY,CAAC,CAAC;YAC1C,CAAC;QACH,CAAC,CACF,CAAC;IACJ,CAAC;IAEO,sBAAsB;QAC5B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;QAEpC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,gBAAgB,GAAG,UAAU,CAAC,KAAK,IAAI,EAAE;gBAC5C,4EAA4E;gBAC5E,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;gBACvB,MAAM,YAAY,GAAa,EAAE,CAAC;gBAElC,KAAK,MAAM,KAAK,IAAI,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,EAAE,CAAC;oBAC5C,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAE,CAAC;oBAC7C,MAAM,EAAE,EAAE,EAAE,KAAK,EAAE,eAAe,EAAE,GAAG,OAAO,CAAC;oBAC/C,IAAI,CAAC,EAAE,EAAE,CAAC;wBACR,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,SAAS;oBACX,CAAC;oBAED,IAAI,EAAE,GAAG,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,GAAG,GAAG,EAAE,CAAC;wBAC3C,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;oBAC3B,CAAC;gBACH,CAAC;gBAED,IAAI,YAAY,CAAC,MAAM,EAAE,CAAC;oBACxB,MAAM,IAAI,CAAC,WAAW,CAAC,YAAY,CAAC,CAAC;gBACvC,CAAC;gBAED,IAAI,CAAC,sBAAsB,EAAE,CAAC;YAChC,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC,CAAC;QAClC,CAAC;IACH,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QAEnB,IAAI,IAAI,CAAC,gBAAgB,EAAE,CAAC;YAC1B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;YACpC,IAAI,CAAC,gBAAgB,GAAG,SAAS,CAAC;QACpC,CAAC;QAED,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE,CAAC;IAC3B,CAAC;IAED;;;OAGG;IACH,QAAQ,CACN,KAAa,EACb,KAAa,EACb,EAAU,EACV,sBAAsB,GAAG,KAAK;QAE9B,MAAM,eAAe,GAAG,sBAAsB;YAC5C,CAAC,CAAC,IAAI,uCAAe,EAAE;YACvB,CAAC,CAAC,SAAS,CAAC;QACd,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,KAAK,EAAE,CAAC;YAC1B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,eAAe,EAAE,CAAC,CAAC;QAC9D,CAAC;QACD,OAAO,eAAe,CAAC;IACzB,CAAC;IAED;;OAEG;IACH,UAAU,CAAC,KAAa;QACtB,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;IACjC,CAAC;IAED;;OAEG;IACH,iBAAiB;QACf,OAAO,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;IAC/B,CAAC;IAED;;OAEG;IACH,SAAS;QACP,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,gBAAgB,KAAK,SAAS,CAAC;IAC7D,CAAC;IAED;;;;;OAKG;IACH,SAAS,CAAC,KAAa,EAAE,MAAe;QACtC,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;QAC5C,IAAI,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,eAAe,EAAE,CAAC;YAC7B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACtC,OAAO,IAAI,CAAC;QACd,CAAC;QACD,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;;OAGG;IACH,aAAa,CAAC,MAAe;QAC3B,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,EAAE,CAAC;YAChD,IAAI,OAAO,CAAC,eAAe,EAAE,CAAC;gBAC5B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACxC,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;OAGG;IACH,gBAAgB;QACd,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC;IAC7C,CAAC;CACF;AA5MD,kCA4MC"}
|
||||
44
backend/node_modules/bullmq/dist/cjs/classes/main-base.js
generated
vendored
Normal file
44
backend/node_modules/bullmq/dist/cjs/classes/main-base.js
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Wrapper for sandboxing.
|
||||
*
|
||||
*/
|
||||
const child_processor_1 = require("./child-processor");
|
||||
const enums_1 = require("../enums");
|
||||
const utils_1 = require("../utils");
|
||||
exports.default = (send, receiver) => {
|
||||
const childProcessor = new child_processor_1.ChildProcessor(send, receiver);
|
||||
receiver === null || receiver === void 0 ? void 0 : receiver.on('message', async (msg) => {
|
||||
try {
|
||||
switch (msg.cmd) {
|
||||
case enums_1.ChildCommand.Init:
|
||||
await childProcessor.init(msg.value);
|
||||
break;
|
||||
case enums_1.ChildCommand.Start:
|
||||
await childProcessor.start(msg.job, msg === null || msg === void 0 ? void 0 : msg.token);
|
||||
break;
|
||||
case enums_1.ChildCommand.Stop:
|
||||
break;
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
console.error('Error handling child message');
|
||||
}
|
||||
});
|
||||
process.on('SIGTERM', () => childProcessor.waitForCurrentJobAndExit());
|
||||
process.on('SIGINT', () => childProcessor.waitForCurrentJobAndExit());
|
||||
process.on('uncaughtException', async (err) => {
|
||||
if (typeof err !== 'object') {
|
||||
err = new Error((0, utils_1.toString)(err));
|
||||
}
|
||||
await send({
|
||||
cmd: enums_1.ParentCommand.Failed,
|
||||
value: (0, utils_1.errorToJSON)(err),
|
||||
});
|
||||
// An uncaughException leaves this process in a potentially undetermined state so
|
||||
// we must exit
|
||||
process.exit();
|
||||
});
|
||||
};
|
||||
//# sourceMappingURL=main-base.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/main-base.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/main-base.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"main-base.js","sourceRoot":"","sources":["../../../src/classes/main-base.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,uDAAmD;AACnD,oCAAuD;AACvD,oCAAiD;AAGjD,kBAAe,CAAC,IAAiC,EAAE,QAAkB,EAAE,EAAE;IACvE,MAAM,cAAc,GAAG,IAAI,gCAAc,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;IAE1D,QAAQ,aAAR,QAAQ,uBAAR,QAAQ,CAAE,EAAE,CAAC,SAAS,EAAE,KAAK,EAAC,GAAG,EAAC,EAAE;QAClC,IAAI,CAAC;YACH,QAAQ,GAAG,CAAC,GAAmB,EAAE,CAAC;gBAChC,KAAK,oBAAY,CAAC,IAAI;oBACpB,MAAM,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;oBACrC,MAAM;gBACR,KAAK,oBAAY,CAAC,KAAK;oBACrB,MAAM,cAAc,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,aAAH,GAAG,uBAAH,GAAG,CAAE,KAAK,CAAC,CAAC;oBAChD,MAAM;gBACR,KAAK,oBAAY,CAAC,IAAI;oBACpB,MAAM;YACV,CAAC;QACH,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,8BAA8B,CAAC,CAAC;QAChD,CAAC;IACH,CAAC,CAAC,CAAC;IAEH,OAAO,CAAC,EAAE,CAAC,SAAS,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IACvE,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IAEtE,OAAO,CAAC,EAAE,CAAC,mBAAmB,EAAE,KAAK,EAAE,GAAQ,EAAE,EAAE;QACjD,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE,CAAC;YAC5B,GAAG,GAAG,IAAI,KAAK,CAAC,IAAA,gBAAQ,EAAC,GAAG,CAAC,CAAC,CAAC;QACjC,CAAC;QAED,MAAM,IAAI,CAAC;YACT,GAAG,EAAE,qBAAa,CAAC,MAAM;YACzB,KAAK,EAAE,IAAA,mBAAW,EAAC,GAAG,CAAC;SACxB,CAAC,CAAC;QAEH,iFAAiF;QACjF,eAAe;QACf,OAAO,CAAC,IAAI,EAAE,CAAC;IACjB,CAAC,CAAC,CAAC;AACL,CAAC,CAAC"}
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/main-worker.d.ts
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/main-worker.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export {};
|
||||
10
backend/node_modules/bullmq/dist/cjs/classes/main-worker.js
generated
vendored
Normal file
10
backend/node_modules/bullmq/dist/cjs/classes/main-worker.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Worker Thread wrapper for sandboxing
|
||||
*
|
||||
*/
|
||||
const worker_threads_1 = require("worker_threads");
|
||||
const main_base_1 = require("./main-base");
|
||||
(0, main_base_1.default)(async (msg) => worker_threads_1.parentPort.postMessage(msg), worker_threads_1.parentPort);
|
||||
//# sourceMappingURL=main-worker.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/main-worker.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/main-worker.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"main-worker.js","sourceRoot":"","sources":["../../../src/classes/main-worker.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,mDAA4C;AAC5C,2CAAmC;AAEnC,IAAA,mBAAQ,EAAC,KAAK,EAAE,GAAQ,EAAE,EAAE,CAAC,2BAAU,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,2BAAU,CAAC,CAAC"}
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/main.d.ts
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/main.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export {};
|
||||
10
backend/node_modules/bullmq/dist/cjs/classes/main.js
generated
vendored
Normal file
10
backend/node_modules/bullmq/dist/cjs/classes/main.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Child process wrapper for sandboxing.
|
||||
*
|
||||
*/
|
||||
const utils_1 = require("../utils");
|
||||
const main_base_1 = require("./main-base");
|
||||
(0, main_base_1.default)((msg) => (0, utils_1.childSend)(process, msg), process);
|
||||
//# sourceMappingURL=main.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/main.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/main.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"main.js","sourceRoot":"","sources":["../../../src/classes/main.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,oCAAqC;AACrC,2CAAmC;AAEnC,IAAA,mBAAQ,EAAC,CAAC,GAAQ,EAAE,EAAE,CAAC,IAAA,iBAAS,EAAC,OAAO,EAAE,GAAG,CAAC,EAAE,OAAO,CAAC,CAAC"}
|
||||
157
backend/node_modules/bullmq/dist/cjs/classes/queue-base.js
generated
vendored
Normal file
157
backend/node_modules/bullmq/dist/cjs/classes/queue-base.js
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueueBase = void 0;
|
||||
const events_1 = require("events");
|
||||
const utils_1 = require("../utils");
|
||||
const create_scripts_1 = require("../utils/create-scripts");
|
||||
const redis_connection_1 = require("./redis-connection");
|
||||
const job_1 = require("./job");
|
||||
const queue_keys_1 = require("./queue-keys");
|
||||
/**
|
||||
* Base class for all classes that need to interact with queues.
|
||||
* This class is normally not used directly, but extended by the other classes.
|
||||
*
|
||||
*/
|
||||
class QueueBase extends events_1.EventEmitter {
|
||||
/**
|
||||
*
|
||||
* @param name - The name of the queue.
|
||||
* @param opts - Options for the queue.
|
||||
* @param Connection - An optional "Connection" class used to instantiate a Connection. This is useful for
|
||||
* testing with mockups and/or extending the Connection class and passing an alternate implementation.
|
||||
*/
|
||||
constructor(name, opts = { connection: {} }, Connection = redis_connection_1.RedisConnection, hasBlockingConnection = false) {
|
||||
super();
|
||||
this.name = name;
|
||||
this.opts = opts;
|
||||
this.closed = false;
|
||||
this.hasBlockingConnection = false;
|
||||
this.hasBlockingConnection = hasBlockingConnection;
|
||||
this.opts = Object.assign({ prefix: 'bull' }, opts);
|
||||
if (!name) {
|
||||
throw new Error('Queue name must be provided');
|
||||
}
|
||||
if (name.includes(':')) {
|
||||
throw new Error('Queue name cannot contain :');
|
||||
}
|
||||
this.connection = new Connection(opts.connection, {
|
||||
shared: (0, utils_1.isRedisInstance)(opts.connection),
|
||||
blocking: hasBlockingConnection,
|
||||
skipVersionCheck: opts.skipVersionCheck,
|
||||
skipWaitingForReady: opts.skipWaitingForReady,
|
||||
});
|
||||
this.connection.on('error', (error) => this.emit('error', error));
|
||||
this.connection.on('close', () => {
|
||||
if (!this.closing) {
|
||||
this.emit('ioredis:close');
|
||||
}
|
||||
});
|
||||
const queueKeys = new queue_keys_1.QueueKeys(opts.prefix);
|
||||
this.qualifiedName = queueKeys.getQueueQualifiedName(name);
|
||||
this.keys = queueKeys.getKeys(name);
|
||||
this.toKey = (type) => queueKeys.toKey(name, type);
|
||||
this.createScripts();
|
||||
}
|
||||
/**
|
||||
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
|
||||
*/
|
||||
get client() {
|
||||
return this.connection.client;
|
||||
}
|
||||
createScripts() {
|
||||
this.scripts = (0, create_scripts_1.createScripts)(this);
|
||||
}
|
||||
/**
|
||||
* Returns the version of the Redis instance the client is connected to,
|
||||
*/
|
||||
get redisVersion() {
|
||||
return this.connection.redisVersion;
|
||||
}
|
||||
/**
|
||||
* Helper to easily extend Job class calls.
|
||||
*/
|
||||
get Job() {
|
||||
return job_1.Job;
|
||||
}
|
||||
/**
|
||||
* Emits an event. Normally used by subclasses to emit events.
|
||||
*
|
||||
* @param event - The emitted event.
|
||||
* @param args -
|
||||
* @returns
|
||||
*/
|
||||
emit(event, ...args) {
|
||||
try {
|
||||
return super.emit(event, ...args);
|
||||
}
|
||||
catch (err) {
|
||||
try {
|
||||
return super.emit('error', err);
|
||||
}
|
||||
catch (err) {
|
||||
// We give up if the error event also throws an exception.
|
||||
console.error(err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
waitUntilReady() {
|
||||
return this.client;
|
||||
}
|
||||
base64Name() {
|
||||
return Buffer.from(this.name).toString('base64');
|
||||
}
|
||||
clientName(suffix = '') {
|
||||
const queueNameBase64 = this.base64Name();
|
||||
return `${this.opts.prefix}:${queueNameBase64}${suffix}`;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Closes the connection and returns a promise that resolves when the connection is closed.
|
||||
*/
|
||||
async close() {
|
||||
if (!this.closing) {
|
||||
this.closing = this.connection.close();
|
||||
}
|
||||
await this.closing;
|
||||
this.closed = true;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Force disconnects a connection.
|
||||
*/
|
||||
disconnect() {
|
||||
return this.connection.disconnect();
|
||||
}
|
||||
async checkConnectionError(fn, delayInMs = utils_1.DELAY_TIME_5) {
|
||||
try {
|
||||
return await fn();
|
||||
}
|
||||
catch (error) {
|
||||
if ((0, utils_1.isNotConnectionError)(error)) {
|
||||
this.emit('error', error);
|
||||
}
|
||||
if (!this.closing && delayInMs) {
|
||||
await (0, utils_1.delay)(delayInMs);
|
||||
}
|
||||
else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Wraps the code with telemetry and provides a span for configuration.
|
||||
*
|
||||
* @param spanKind - kind of the span: Producer, Consumer, Internal
|
||||
* @param operation - operation name (such as add, process, etc)
|
||||
* @param destination - destination name (normally the queue name)
|
||||
* @param callback - code to wrap with telemetry
|
||||
* @param srcPropagationMedatada -
|
||||
* @returns
|
||||
*/
|
||||
trace(spanKind, operation, destination, callback, srcPropagationMetadata) {
|
||||
return (0, utils_1.trace)(this.opts.telemetry, spanKind, this.name, operation, destination, callback, srcPropagationMetadata);
|
||||
}
|
||||
}
|
||||
exports.QueueBase = QueueBase;
|
||||
//# sourceMappingURL=queue-base.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue-base.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue-base.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"queue-base.js","sourceRoot":"","sources":["../../../src/classes/queue-base.ts"],"names":[],"mappings":";;;AAAA,mCAAsC;AAQtC,oCAMkB;AAClB,4DAAwD;AACxD,yDAAqD;AACrD,+BAA4B;AAC5B,6CAAkD;AAIlD;;;;GAIG;AACH,MAAa,SAAU,SAAQ,qBAAY;IAWzC;;;;;;OAMG;IACH,YACkB,IAAY,EACrB,OAAyB,EAAE,UAAU,EAAE,EAAE,EAAE,EAClD,aAAqC,kCAAe,EACpD,qBAAqB,GAAG,KAAK;QAE7B,KAAK,EAAE,CAAC;QALQ,SAAI,GAAJ,IAAI,CAAQ;QACrB,SAAI,GAAJ,IAAI,CAAuC;QAf1C,WAAM,GAAG,KAAK,CAAC;QACf,0BAAqB,GAAG,KAAK,CAAC;QAoBtC,IAAI,CAAC,qBAAqB,GAAG,qBAAqB,CAAC;QACnD,IAAI,CAAC,IAAI,mBACP,MAAM,EAAE,MAAM,IACX,IAAI,CACR,CAAC;QAEF,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACvB,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,CAAC,UAAU,GAAG,IAAI,UAAU,CAAC,IAAI,CAAC,UAAU,EAAE;YAChD,MAAM,EAAE,IAAA,uBAAe,EAAC,IAAI,CAAC,UAAU,CAAC;YACxC,QAAQ,EAAE,qBAAqB;YAC/B,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,mBAAmB,EAAE,IAAI,CAAC,mBAAmB;SAC9C,CAAC,CAAC;QAEH,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,KAAY,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACzE,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE;YAC/B,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;gBAClB,IAAI,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;YAC7B,CAAC;QACH,CAAC,CAAC,CAAC;QAEH,MAAM,SAAS,GAAG,IAAI,sBAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QAC7C,IAAI,CAAC,aAAa,GAAG,SAAS,CAAC,qBAAqB,CAAC,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,IAAI,GAAG,SAAS,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;QACpC,IAAI,CAAC,KAAK,GAAG,CAAC,IAAY,EAAE,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,aAAa,EAAE,CAAC;IACvB,CAAC;IAED;;OAEG;IACH,IAAI,MAAM;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC;IAChC,CAAC;IAES,aAAa;QACrB,IAAI,CAAC,OAAO,GAAG,IAAA,8BAAa,EAAC,IAAI,CAAC,CAAC;IACrC,CAAC;IAED;;OAEG;IACH,IAAI,YAAY;QACd,OAAO,IAAI,CAAC,UAAU,CAAC,YAAY,CAAC;IACtC,CAAC;IAED;;OAEG;IACH,IAAc,GAAG;QACf,OAAO,SAAG,CAAC;IACb,CAAC;IAED;;;;;;OAMG;IACH,IAAI,CAAC,KAAsB,EAAE,GAAG,IAAW;QACzC,IAAI,CAAC;YACH,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;QACpC,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,IAAI,CAAC;gBACH,OAAO,KAAK,CAAC,IAAI,CAAC,OAAO,EAAE,GAAG,CAAC,CAAC;YAClC,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,0DAA0D;gBAC1D,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;gBACnB,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC;IACH,CAAC;IAED,cAAc;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC;IACrB,CAAC;IAES,UAAU;QAClB,OAAO,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACnD,CAAC;IAES,UAAU,CAAC,MAAM,GAAG,EAAE;QAC9B,MAAM,eAAe,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QAC1C,OAAO,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,eAAe,GAAG,MAAM,EAAE,CAAC;IAC3D,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;QACnB,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;IACrB,CAAC;IAED;;;OAGG;IACH,UAAU;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;IACtC,CAAC;IAES,KAAK,CAAC,oBAAoB,CAClC,EAAoB,EACpB,SAAS,GAAG,oBAAY;QAExB,IAAI,CAAC;YACH,OAAO,MAAM,EAAE,EAAE,CAAC;QACpB,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,IAAI,IAAA,4BAAoB,EAAC,KAAc,CAAC,EAAE,CAAC;gBACzC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAS,KAAK,CAAC,CAAC;YACnC,CAAC;YAED,IAAI,CAAC,IAAI,CAAC,OAAO,IAAI,SAAS,EAAE,CAAC;gBAC/B,MAAM,IAAA,aAAK,EAAC,SAAS,CAAC,CAAC;YACzB,CAAC;iBAAM,CAAC;gBACN,OAAO;YACT,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;;;;;;OASG;IACH,KAAK,CACH,QAAkB,EAClB,SAAiB,EACjB,WAAmB,EACnB,QAA0E,EAC1E,sBAA+B;QAE/B,OAAO,IAAA,aAAK,EACV,IAAI,CAAC,IAAI,CAAC,SAAS,EACnB,QAAQ,EACR,IAAI,CAAC,IAAI,EACT,SAAS,EACT,WAAW,EACX,QAAQ,EACR,sBAAsB,CACvB,CAAC;IACJ,CAAC;CACF;AA1LD,8BA0LC"}
|
||||
42
backend/node_modules/bullmq/dist/cjs/classes/queue-events-producer.js
generated
vendored
Normal file
42
backend/node_modules/bullmq/dist/cjs/classes/queue-events-producer.js
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueueEventsProducer = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
/**
|
||||
* The QueueEventsProducer class is used for publishing custom events.
|
||||
*/
|
||||
class QueueEventsProducer extends queue_base_1.QueueBase {
|
||||
constructor(name, opts = {
|
||||
connection: {},
|
||||
}, Connection) {
|
||||
super(name, Object.assign({ blockingConnection: false }, opts), Connection);
|
||||
this.opts = opts;
|
||||
}
|
||||
/**
|
||||
* Publish custom event to be processed in QueueEvents.
|
||||
* @param argsObj - Event payload
|
||||
* @param maxEvents - Max quantity of events to be saved
|
||||
*/
|
||||
async publishEvent(argsObj, maxEvents = 1000) {
|
||||
const client = await this.client;
|
||||
const key = this.keys.events;
|
||||
const { eventName } = argsObj, restArgs = tslib_1.__rest(argsObj, ["eventName"]);
|
||||
const args = ['MAXLEN', '~', maxEvents, '*', 'event', eventName];
|
||||
for (const [key, value] of Object.entries(restArgs)) {
|
||||
args.push(key, value);
|
||||
}
|
||||
await client.xadd(key, ...args);
|
||||
}
|
||||
/**
|
||||
* Closes the connection and returns a promise that resolves when the connection is closed.
|
||||
*/
|
||||
async close() {
|
||||
if (!this.closing) {
|
||||
this.closing = this.connection.close();
|
||||
}
|
||||
await this.closing;
|
||||
}
|
||||
}
|
||||
exports.QueueEventsProducer = QueueEventsProducer;
|
||||
//# sourceMappingURL=queue-events-producer.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue-events-producer.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue-events-producer.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"queue-events-producer.js","sourceRoot":"","sources":["../../../src/classes/queue-events-producer.ts"],"names":[],"mappings":";;;;AACA,6CAAyC;AAGzC;;GAEG;AACH,MAAa,mBAAoB,SAAQ,sBAAS;IAChD,YACE,IAAY,EACZ,OAAmC;QACjC,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;QAEnC,KAAK,CACH,IAAI,kBAEF,kBAAkB,EAAE,KAAK,IACtB,IAAI,GAET,UAAU,CACX,CAAC;QAEF,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,YAAY,CAChB,OAAU,EACV,SAAS,GAAG,IAAI;QAEhB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;QACjC,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,MAAM,EAAE,SAAS,KAAkB,OAAO,EAApB,QAAQ,kBAAK,OAAO,EAApC,aAA0B,CAAU,CAAC;QAC3C,MAAM,IAAI,GAAU,CAAC,QAAQ,EAAE,GAAG,EAAE,SAAS,EAAE,GAAG,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;QAExE,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;YACpD,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;QACxB,CAAC;QAED,MAAM,MAAM,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,CAAC;IAClC,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;IACrB,CAAC;CACF;AAlDD,kDAkDC"}
|
||||
139
backend/node_modules/bullmq/dist/cjs/classes/queue-events.js
generated
vendored
Normal file
139
backend/node_modules/bullmq/dist/cjs/classes/queue-events.js
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueueEvents = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const utils_1 = require("../utils");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
/**
|
||||
* The QueueEvents class is used for listening to the global events
|
||||
* emitted by a given queue.
|
||||
*
|
||||
* This class requires a dedicated redis connection.
|
||||
*
|
||||
*/
|
||||
class QueueEvents extends queue_base_1.QueueBase {
|
||||
constructor(name, _a = {
|
||||
connection: {},
|
||||
}, Connection) {
|
||||
var { connection, autorun = true } = _a, opts = tslib_1.__rest(_a, ["connection", "autorun"]);
|
||||
super(name, Object.assign(Object.assign({}, opts), { connection: (0, utils_1.isRedisInstance)(connection)
|
||||
? connection.duplicate()
|
||||
: connection }), Connection, true);
|
||||
this.running = false;
|
||||
this.blocking = false;
|
||||
this.opts = Object.assign({
|
||||
blockingTimeout: 10000,
|
||||
}, this.opts);
|
||||
if (autorun) {
|
||||
this.run().catch(error => this.emit('error', error));
|
||||
}
|
||||
}
|
||||
emit(event, ...args) {
|
||||
return super.emit(event, ...args);
|
||||
}
|
||||
off(eventName, listener) {
|
||||
super.off(eventName, listener);
|
||||
return this;
|
||||
}
|
||||
on(event, listener) {
|
||||
super.on(event, listener);
|
||||
return this;
|
||||
}
|
||||
once(event, listener) {
|
||||
super.once(event, listener);
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Manually starts running the event consumming loop. This shall be used if you do not
|
||||
* use the default "autorun" option on the constructor.
|
||||
*/
|
||||
async run() {
|
||||
if (!this.running) {
|
||||
try {
|
||||
this.running = true;
|
||||
const client = await this.client;
|
||||
// TODO: Planed for deprecation as it has no really a use case
|
||||
try {
|
||||
await client.client('SETNAME', this.clientName(utils_1.QUEUE_EVENT_SUFFIX));
|
||||
}
|
||||
catch (err) {
|
||||
if (!utils_1.clientCommandMessageReg.test(err.message)) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
await this.consumeEvents(client);
|
||||
}
|
||||
catch (error) {
|
||||
this.running = false;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new Error('Queue Events is already running.');
|
||||
}
|
||||
}
|
||||
async consumeEvents(client) {
|
||||
const opts = this.opts;
|
||||
const key = this.keys.events;
|
||||
let id = opts.lastEventId || '$';
|
||||
while (!this.closing) {
|
||||
this.blocking = true;
|
||||
// Cast to actual return type, see: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/44301
|
||||
const data = await this.checkConnectionError(() => client.xread('BLOCK', opts.blockingTimeout, 'STREAMS', key, id));
|
||||
this.blocking = false;
|
||||
if (data) {
|
||||
const stream = data[0];
|
||||
const events = stream[1];
|
||||
for (let i = 0; i < events.length; i++) {
|
||||
id = events[i][0];
|
||||
const args = (0, utils_1.array2obj)(events[i][1]);
|
||||
//
|
||||
// TODO: we may need to have a separate xtream for progress data
|
||||
// to avoid this hack.
|
||||
switch (args.event) {
|
||||
case 'progress':
|
||||
args.data = JSON.parse(args.data);
|
||||
break;
|
||||
case 'completed':
|
||||
args.returnvalue = JSON.parse(args.returnvalue);
|
||||
break;
|
||||
}
|
||||
const { event } = args, restArgs = tslib_1.__rest(args, ["event"]);
|
||||
if (event === 'drained') {
|
||||
this.emit(event, id);
|
||||
}
|
||||
else {
|
||||
this.emit(event, restArgs, id);
|
||||
if (restArgs.jobId) {
|
||||
this.emit(`${event}:${restArgs.jobId}`, restArgs, id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Stops consuming events and close the underlying Redis connection if necessary.
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async close() {
|
||||
if (!this.closing) {
|
||||
this.closing = (async () => {
|
||||
try {
|
||||
// As the connection has been wrongly markes as "shared" by QueueBase,
|
||||
// we need to forcibly close it here. We should fix QueueBase to avoid this in the future.
|
||||
const client = await this.client;
|
||||
client.disconnect();
|
||||
await this.connection.close(this.blocking);
|
||||
}
|
||||
finally {
|
||||
this.closed = true;
|
||||
}
|
||||
})();
|
||||
}
|
||||
return this.closing;
|
||||
}
|
||||
}
|
||||
exports.QueueEvents = QueueEvents;
|
||||
//# sourceMappingURL=queue-events.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue-events.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue-events.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"queue-events.js","sourceRoot":"","sources":["../../../src/classes/queue-events.ts"],"names":[],"mappings":";;;;AAOA,oCAKkB;AAClB,6CAAyC;AA8PzC;;;;;;GAMG;AACH,MAAa,WAAY,SAAQ,sBAAS;IAIxC,YACE,IAAY,EACZ,KAA8D;QAC5D,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;YAHnC,EAAE,UAAU,EAAE,OAAO,GAAG,IAAI,OAE3B,EAFgC,IAAI,sBAArC,yBAAuC,CAAF;QAKrC,KAAK,CACH,IAAI,kCAEC,IAAI,KACP,UAAU,EAAE,IAAA,uBAAe,EAAC,UAAU,CAAC;gBACrC,CAAC,CAAe,UAAW,CAAC,SAAS,EAAE;gBACvC,CAAC,CAAC,UAAU,KAEhB,UAAU,EACV,IAAI,CACL,CAAC;QApBI,YAAO,GAAG,KAAK,CAAC;QAChB,aAAQ,GAAG,KAAK,CAAC;QAqBvB,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CACvB;YACE,eAAe,EAAE,KAAK;SACvB,EACD,IAAI,CAAC,IAAI,CACV,CAAC;QAEF,IAAI,OAAO,EAAE,CAAC;YACZ,IAAI,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACvD,CAAC;IACH,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,GAAG,IAA8B;QAC3C,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;IACpC,CAAC;IAED,GAAG,CAGD,SAAY,EAAE,QAAgB;QAC9B,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,QAAoC,CAAC,CAAC;QAC3D,OAAO,IAAI,CAAC;IACd,CAAC;IAED,EAAE,CAGA,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACtD,OAAO,IAAI,CAAC;IACd,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACxD,OAAO,IAAI,CAAC;IACd,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,GAAG;QACP,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC;gBACH,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC;gBACpB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;gBAEjC,8DAA8D;gBAC9D,IAAI,CAAC;oBACH,MAAM,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,0BAAkB,CAAC,CAAC,CAAC;gBACtE,CAAC;gBAAC,OAAO,GAAG,EAAE,CAAC;oBACb,IAAI,CAAC,+BAAuB,CAAC,IAAI,CAAS,GAAI,CAAC,OAAO,CAAC,EAAE,CAAC;wBACxD,MAAM,GAAG,CAAC;oBACZ,CAAC;gBACH,CAAC;gBAED,MAAM,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;YACnC,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;gBACrB,MAAM,KAAK,CAAC;YACd,CAAC;QACH,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC,CAAC;QACtD,CAAC;IACH,CAAC;IAEO,KAAK,CAAC,aAAa,CAAC,MAAmB;QAC7C,MAAM,IAAI,GAAuB,IAAI,CAAC,IAAI,CAAC;QAE3C,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,IAAI,EAAE,GAAG,IAAI,CAAC,WAAW,IAAI,GAAG,CAAC;QAEjC,OAAO,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,mGAAmG;YACnG,MAAM,IAAI,GAAkB,MAAM,IAAI,CAAC,oBAAoB,CAAC,GAAG,EAAE,CAC/D,MAAM,CAAC,KAAK,CAAC,OAAO,EAAE,IAAI,CAAC,eAAgB,EAAE,SAAS,EAAE,GAAG,EAAE,EAAE,CAAC,CACjE,CAAC;YACF,IAAI,CAAC,QAAQ,GAAG,KAAK,CAAC;YACtB,IAAI,IAAI,EAAE,CAAC;gBACT,MAAM,MAAM,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;gBACvB,MAAM,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;gBAEzB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;oBACvC,EAAE,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAClB,MAAM,IAAI,GAAG,IAAA,iBAAS,EAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAErC,EAAE;oBACF,gEAAgE;oBAChE,sBAAsB;oBACtB,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;wBACnB,KAAK,UAAU;4BACb,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;4BAClC,MAAM;wBACR,KAAK,WAAW;4BACd,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;4BAChD,MAAM;oBACV,CAAC;oBAED,MAAM,EAAE,KAAK,KAAkB,IAAI,EAAjB,QAAQ,kBAAK,IAAI,EAA7B,SAAsB,CAAO,CAAC;oBAEpC,IAAI,KAAK,KAAK,SAAS,EAAE,CAAC;wBACxB,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;oBACvB,CAAC;yBAAM,CAAC;wBACN,IAAI,CAAC,IAAI,CAAC,KAAY,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBACtC,IAAI,QAAQ,CAAC,KAAK,EAAE,CAAC;4BACnB,IAAI,CAAC,IAAI,CAAC,GAAG,KAAK,IAAI,QAAQ,CAAC,KAAK,EAAS,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBAC/D,CAAC;oBACH,CAAC;gBACH,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,CAAC,KAAK,IAAI,EAAE;gBACzB,IAAI,CAAC;oBACH,sEAAsE;oBACtE,0FAA0F;oBAC1F,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;oBACjC,MAAM,CAAC,UAAU,EAAE,CAAC;oBACpB,MAAM,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;gBAC7C,CAAC;wBAAS,CAAC;oBACT,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;gBACrB,CAAC;YACH,CAAC,CAAC,EAAE,CAAC;QACP,CAAC;QACD,OAAO,IAAI,CAAC,OAAO,CAAC;IACtB,CAAC;CACF;AApKD,kCAoKC"}
|
||||
509
backend/node_modules/bullmq/dist/cjs/classes/queue-getters.js
generated
vendored
Normal file
509
backend/node_modules/bullmq/dist/cjs/classes/queue-getters.js
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
/*eslint-env node */
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueueGetters = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
const utils_1 = require("../utils");
|
||||
/**
|
||||
* Provides different getters for different aspects of a queue.
|
||||
*/
|
||||
class QueueGetters extends queue_base_1.QueueBase {
|
||||
getJob(jobId) {
|
||||
return this.Job.fromId(this, jobId);
|
||||
}
|
||||
commandByType(types, count, callback) {
|
||||
return types.map((type) => {
|
||||
type = type === 'waiting' ? 'wait' : type; // alias
|
||||
const key = this.toKey(type);
|
||||
switch (type) {
|
||||
case 'completed':
|
||||
case 'failed':
|
||||
case 'delayed':
|
||||
case 'prioritized':
|
||||
case 'repeat':
|
||||
case 'waiting-children':
|
||||
return callback(key, count ? 'zcard' : 'zrange');
|
||||
case 'active':
|
||||
case 'wait':
|
||||
case 'paused':
|
||||
return callback(key, count ? 'llen' : 'lrange');
|
||||
}
|
||||
});
|
||||
}
|
||||
sanitizeJobTypes(types) {
|
||||
const currentTypes = typeof types === 'string' ? [types] : types;
|
||||
if (Array.isArray(currentTypes) && currentTypes.length > 0) {
|
||||
const sanitizedTypes = [...currentTypes];
|
||||
if (sanitizedTypes.indexOf('waiting') !== -1) {
|
||||
sanitizedTypes.push('paused');
|
||||
}
|
||||
return [...new Set(sanitizedTypes)];
|
||||
}
|
||||
return [
|
||||
'active',
|
||||
'completed',
|
||||
'delayed',
|
||||
'failed',
|
||||
'paused',
|
||||
'prioritized',
|
||||
'waiting',
|
||||
'waiting-children',
|
||||
];
|
||||
}
|
||||
/**
|
||||
Returns the number of jobs waiting to be processed. This includes jobs that are
|
||||
"waiting" or "delayed" or "prioritized" or "waiting-children".
|
||||
*/
|
||||
async count() {
|
||||
const count = await this.getJobCountByTypes('waiting', 'paused', 'delayed', 'prioritized', 'waiting-children');
|
||||
return count;
|
||||
}
|
||||
/**
|
||||
* Returns the time to live for a rate limited key in milliseconds.
|
||||
* @param maxJobs - max jobs to be considered in rate limit state. If not passed
|
||||
* it will return the remaining ttl without considering if max jobs is excedeed.
|
||||
* @returns -2 if the key does not exist.
|
||||
* -1 if the key exists but has no associated expire.
|
||||
* @see {@link https://redis.io/commands/pttl/}
|
||||
*/
|
||||
async getRateLimitTtl(maxJobs) {
|
||||
return this.scripts.getRateLimitTtl(maxJobs);
|
||||
}
|
||||
/**
|
||||
* Get jobId that starts debounced state.
|
||||
* @deprecated use getDeduplicationJobId method
|
||||
*
|
||||
* @param id - debounce identifier
|
||||
*/
|
||||
async getDebounceJobId(id) {
|
||||
const client = await this.client;
|
||||
return client.get(`${this.keys.de}:${id}`);
|
||||
}
|
||||
/**
|
||||
* Get jobId from deduplicated state.
|
||||
*
|
||||
* @param id - deduplication identifier
|
||||
*/
|
||||
async getDeduplicationJobId(id) {
|
||||
const client = await this.client;
|
||||
return client.get(`${this.keys.de}:${id}`);
|
||||
}
|
||||
/**
|
||||
* Get global concurrency value.
|
||||
* Returns null in case no value is set.
|
||||
*/
|
||||
async getGlobalConcurrency() {
|
||||
const client = await this.client;
|
||||
const concurrency = await client.hget(this.keys.meta, 'concurrency');
|
||||
if (concurrency) {
|
||||
return Number(concurrency);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Get global rate limit values.
|
||||
* Returns null in case no value is set.
|
||||
*/
|
||||
async getGlobalRateLimit() {
|
||||
const client = await this.client;
|
||||
const [max, duration] = await client.hmget(this.keys.meta, 'max', 'duration');
|
||||
if (max && duration) {
|
||||
return {
|
||||
max: Number(max),
|
||||
duration: Number(duration),
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Job counts by type
|
||||
*
|
||||
* Queue#getJobCountByTypes('completed') =\> completed count
|
||||
* Queue#getJobCountByTypes('completed', 'failed') =\> completed + failed count
|
||||
* Queue#getJobCountByTypes('completed', 'waiting', 'failed') =\> completed + waiting + failed count
|
||||
*/
|
||||
async getJobCountByTypes(...types) {
|
||||
const result = await this.getJobCounts(...types);
|
||||
return Object.values(result).reduce((sum, count) => sum + count, 0);
|
||||
}
|
||||
/**
|
||||
* Returns the job counts for each type specified or every list/set in the queue by default.
|
||||
*
|
||||
* @returns An object, key (type) and value (count)
|
||||
*/
|
||||
async getJobCounts(...types) {
|
||||
const currentTypes = this.sanitizeJobTypes(types);
|
||||
const responses = await this.scripts.getCounts(currentTypes);
|
||||
const counts = {};
|
||||
responses.forEach((res, index) => {
|
||||
counts[currentTypes[index]] = res || 0;
|
||||
});
|
||||
return counts;
|
||||
}
|
||||
/**
|
||||
* Get current job state.
|
||||
*
|
||||
* @param jobId - job identifier.
|
||||
* @returns Returns one of these values:
|
||||
* 'completed', 'failed', 'delayed', 'active', 'waiting', 'waiting-children', 'unknown'.
|
||||
*/
|
||||
getJobState(jobId) {
|
||||
return this.scripts.getState(jobId);
|
||||
}
|
||||
/**
|
||||
* Get global queue configuration.
|
||||
*
|
||||
* @returns Returns the global queue configuration.
|
||||
*/
|
||||
async getMeta() {
|
||||
const client = await this.client;
|
||||
const config = await client.hgetall(this.keys.meta);
|
||||
const { concurrency, max, duration, paused, 'opts.maxLenEvents': maxLenEvents } = config, rest = tslib_1.__rest(config, ["concurrency", "max", "duration", "paused", 'opts.maxLenEvents']);
|
||||
const parsedConfig = rest;
|
||||
if (concurrency) {
|
||||
parsedConfig['concurrency'] = Number(concurrency);
|
||||
}
|
||||
if (maxLenEvents) {
|
||||
parsedConfig['maxLenEvents'] = Number(maxLenEvents);
|
||||
}
|
||||
if (max) {
|
||||
parsedConfig['max'] = Number(max);
|
||||
}
|
||||
if (duration) {
|
||||
parsedConfig['duration'] = Number(duration);
|
||||
}
|
||||
parsedConfig['paused'] = paused === '1';
|
||||
return parsedConfig;
|
||||
}
|
||||
/**
|
||||
* @returns Returns the number of jobs in completed status.
|
||||
*/
|
||||
getCompletedCount() {
|
||||
return this.getJobCountByTypes('completed');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in failed status.
|
||||
*/
|
||||
getFailedCount() {
|
||||
return this.getJobCountByTypes('failed');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in delayed status.
|
||||
*/
|
||||
getDelayedCount() {
|
||||
return this.getJobCountByTypes('delayed');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in active status.
|
||||
*/
|
||||
getActiveCount() {
|
||||
return this.getJobCountByTypes('active');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in prioritized status.
|
||||
*/
|
||||
getPrioritizedCount() {
|
||||
return this.getJobCountByTypes('prioritized');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs per priority.
|
||||
*/
|
||||
async getCountsPerPriority(priorities) {
|
||||
const uniquePriorities = [...new Set(priorities)];
|
||||
const responses = await this.scripts.getCountsPerPriority(uniquePriorities);
|
||||
const counts = {};
|
||||
responses.forEach((res, index) => {
|
||||
counts[`${uniquePriorities[index]}`] = res || 0;
|
||||
});
|
||||
return counts;
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in waiting or paused statuses.
|
||||
*/
|
||||
getWaitingCount() {
|
||||
return this.getJobCountByTypes('waiting');
|
||||
}
|
||||
/**
|
||||
* Returns the number of jobs in waiting-children status.
|
||||
*/
|
||||
getWaitingChildrenCount() {
|
||||
return this.getJobCountByTypes('waiting-children');
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "waiting" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getWaiting(start = 0, end = -1) {
|
||||
return this.getJobs(['waiting'], start, end, true);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "waiting-children" status.
|
||||
* I.E. parent jobs that have at least one child that has not completed yet.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getWaitingChildren(start = 0, end = -1) {
|
||||
return this.getJobs(['waiting-children'], start, end, true);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "active" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getActive(start = 0, end = -1) {
|
||||
return this.getJobs(['active'], start, end, true);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "delayed" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getDelayed(start = 0, end = -1) {
|
||||
return this.getJobs(['delayed'], start, end, true);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "prioritized" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getPrioritized(start = 0, end = -1) {
|
||||
return this.getJobs(['prioritized'], start, end, true);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "completed" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getCompleted(start = 0, end = -1) {
|
||||
return this.getJobs(['completed'], start, end, false);
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are in the "failed" status.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
*/
|
||||
getFailed(start = 0, end = -1) {
|
||||
return this.getJobs(['failed'], start, end, false);
|
||||
}
|
||||
/**
|
||||
* Returns the qualified job ids and the raw job data (if available) of the
|
||||
* children jobs of the given parent job.
|
||||
* It is possible to get either the already processed children, in this case
|
||||
* an array of qualified job ids and their result values will be returned,
|
||||
* or the pending children, in this case an array of qualified job ids will
|
||||
* be returned.
|
||||
* A qualified job id is a string representing the job id in a given queue,
|
||||
* for example: "bull:myqueue:jobid".
|
||||
*
|
||||
* @param parentId - The id of the parent job
|
||||
* @param type - "processed" | "pending"
|
||||
* @param opts - Options for the query.
|
||||
*
|
||||
* @returns an object with the following shape:
|
||||
* `{ items: { id: string, v?: any, err?: string } [], jobs: JobJsonRaw[], total: number}`
|
||||
*/
|
||||
async getDependencies(parentId, type, start, end) {
|
||||
const key = this.toKey(type == 'processed'
|
||||
? `${parentId}:processed`
|
||||
: `${parentId}:dependencies`);
|
||||
const { items, total, jobs } = await this.scripts.paginate(key, {
|
||||
start,
|
||||
end,
|
||||
fetchJobs: true,
|
||||
});
|
||||
return {
|
||||
items,
|
||||
jobs,
|
||||
total,
|
||||
};
|
||||
}
|
||||
async getRanges(types, start = 0, end = 1, asc = false) {
|
||||
const multiCommands = [];
|
||||
this.commandByType(types, false, (key, command) => {
|
||||
switch (command) {
|
||||
case 'lrange':
|
||||
multiCommands.push('lrange');
|
||||
break;
|
||||
case 'zrange':
|
||||
multiCommands.push('zrange');
|
||||
break;
|
||||
}
|
||||
});
|
||||
const responses = await this.scripts.getRanges(types, start, end, asc);
|
||||
let results = [];
|
||||
responses.forEach((response, index) => {
|
||||
const result = response || [];
|
||||
if (asc && multiCommands[index] === 'lrange') {
|
||||
results = results.concat(result.reverse());
|
||||
}
|
||||
else {
|
||||
results = results.concat(result);
|
||||
}
|
||||
});
|
||||
return [...new Set(results)];
|
||||
}
|
||||
/**
|
||||
* Returns the jobs that are on the given statuses (note that JobType is synonym for job status)
|
||||
* @param types - the statuses of the jobs to return.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
* @param asc - if true, the jobs will be returned in ascending order.
|
||||
*/
|
||||
async getJobs(types, start = 0, end = -1, asc = false) {
|
||||
const currentTypes = this.sanitizeJobTypes(types);
|
||||
const jobIds = await this.getRanges(currentTypes, start, end, asc);
|
||||
return Promise.all(jobIds.map(jobId => this.Job.fromId(this, jobId)));
|
||||
}
|
||||
/**
|
||||
* Returns the logs for a given Job.
|
||||
* @param jobId - the id of the job to get the logs for.
|
||||
* @param start - zero based index from where to start returning jobs.
|
||||
* @param end - zero based index where to stop returning jobs.
|
||||
* @param asc - if true, the jobs will be returned in ascending order.
|
||||
*/
|
||||
async getJobLogs(jobId, start = 0, end = -1, asc = true) {
|
||||
const client = await this.client;
|
||||
const multi = client.multi();
|
||||
const logsKey = this.toKey(jobId + ':logs');
|
||||
if (asc) {
|
||||
multi.lrange(logsKey, start, end);
|
||||
}
|
||||
else {
|
||||
multi.lrange(logsKey, -(end + 1), -(start + 1));
|
||||
}
|
||||
multi.llen(logsKey);
|
||||
const result = (await multi.exec());
|
||||
if (!asc) {
|
||||
result[0][1].reverse();
|
||||
}
|
||||
return {
|
||||
logs: result[0][1],
|
||||
count: result[1][1],
|
||||
};
|
||||
}
|
||||
async baseGetClients(matcher) {
|
||||
const client = await this.client;
|
||||
try {
|
||||
const clients = (await client.client('LIST'));
|
||||
const list = this.parseClientList(clients, matcher);
|
||||
return list;
|
||||
}
|
||||
catch (err) {
|
||||
if (!utils_1.clientCommandMessageReg.test(err.message)) {
|
||||
throw err;
|
||||
}
|
||||
return [{ name: 'GCP does not support client list' }];
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get the worker list related to the queue. i.e. all the known
|
||||
* workers that are available to process jobs for this queue.
|
||||
* Note: GCP does not support SETNAME, so this call will not work
|
||||
*
|
||||
* @returns - Returns an array with workers info.
|
||||
*/
|
||||
getWorkers() {
|
||||
const unnamedWorkerClientName = `${this.clientName()}`;
|
||||
const namedWorkerClientName = `${this.clientName()}:w:`;
|
||||
const matcher = (name) => name &&
|
||||
(name === unnamedWorkerClientName ||
|
||||
name.startsWith(namedWorkerClientName));
|
||||
return this.baseGetClients(matcher);
|
||||
}
|
||||
/**
|
||||
* Returns the current count of workers for the queue.
|
||||
*
|
||||
* getWorkersCount(): Promise<number>
|
||||
*
|
||||
*/
|
||||
async getWorkersCount() {
|
||||
const workers = await this.getWorkers();
|
||||
return workers.length;
|
||||
}
|
||||
/**
|
||||
* Get queue events list related to the queue.
|
||||
* Note: GCP does not support SETNAME, so this call will not work
|
||||
*
|
||||
* @deprecated do not use this method, it will be removed in the future.
|
||||
*
|
||||
* @returns - Returns an array with queue events info.
|
||||
*/
|
||||
async getQueueEvents() {
|
||||
const clientName = `${this.clientName()}${utils_1.QUEUE_EVENT_SUFFIX}`;
|
||||
return this.baseGetClients((name) => name === clientName);
|
||||
}
|
||||
/**
|
||||
* Get queue metrics related to the queue.
|
||||
*
|
||||
* This method returns the gathered metrics for the queue.
|
||||
* The metrics are represented as an array of job counts
|
||||
* per unit of time (1 minute).
|
||||
*
|
||||
* @param start - Start point of the metrics, where 0
|
||||
* is the newest point to be returned.
|
||||
* @param end - End point of the metrics, where -1 is the
|
||||
* oldest point to be returned.
|
||||
*
|
||||
* @returns - Returns an object with queue metrics.
|
||||
*/
|
||||
async getMetrics(type, start = 0, end = -1) {
|
||||
const [meta, data, count] = await this.scripts.getMetrics(type, start, end);
|
||||
return {
|
||||
meta: {
|
||||
count: parseInt(meta[0] || '0', 10),
|
||||
prevTS: parseInt(meta[1] || '0', 10),
|
||||
prevCount: parseInt(meta[2] || '0', 10),
|
||||
},
|
||||
data: data.map(point => +point || 0),
|
||||
count,
|
||||
};
|
||||
}
|
||||
parseClientList(list, matcher) {
|
||||
const lines = list.split(/\r?\n/);
|
||||
const clients = [];
|
||||
lines.forEach((line) => {
|
||||
const client = {};
|
||||
const keyValues = line.split(' ');
|
||||
keyValues.forEach(function (keyValue) {
|
||||
const index = keyValue.indexOf('=');
|
||||
const key = keyValue.substring(0, index);
|
||||
const value = keyValue.substring(index + 1);
|
||||
client[key] = value;
|
||||
});
|
||||
const name = client['name'];
|
||||
if (matcher(name)) {
|
||||
client['name'] = this.name;
|
||||
client['rawname'] = name;
|
||||
clients.push(client);
|
||||
}
|
||||
});
|
||||
return clients;
|
||||
}
|
||||
/**
|
||||
* Export the metrics for the queue in the Prometheus format.
|
||||
* Automatically exports all the counts returned by getJobCounts().
|
||||
*
|
||||
* @returns - Returns a string with the metrics in the Prometheus format.
|
||||
*
|
||||
* @see {@link https://prometheus.io/docs/instrumenting/exposition_formats/}
|
||||
*
|
||||
**/
|
||||
async exportPrometheusMetrics(globalVariables) {
|
||||
const counts = await this.getJobCounts();
|
||||
const metrics = [];
|
||||
// Match the test's expected HELP text
|
||||
metrics.push('# HELP bullmq_job_count Number of jobs in the queue by state');
|
||||
metrics.push('# TYPE bullmq_job_count gauge');
|
||||
const variables = !globalVariables
|
||||
? ''
|
||||
: Object.keys(globalVariables).reduce((acc, curr) => `${acc}, ${curr}="${globalVariables[curr]}"`, '');
|
||||
for (const [state, count] of Object.entries(counts)) {
|
||||
metrics.push(`bullmq_job_count{queue="${this.name}", state="${state}"${variables}} ${count}`);
|
||||
}
|
||||
return metrics.join('\n');
|
||||
}
|
||||
}
|
||||
exports.QueueGetters = QueueGetters;
|
||||
//# sourceMappingURL=queue-getters.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue-getters.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue-getters.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
43
backend/node_modules/bullmq/dist/cjs/classes/queue-keys.js
generated
vendored
Normal file
43
backend/node_modules/bullmq/dist/cjs/classes/queue-keys.js
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.QueueKeys = void 0;
|
||||
class QueueKeys {
|
||||
constructor(prefix = 'bull') {
|
||||
this.prefix = prefix;
|
||||
}
|
||||
getKeys(name) {
|
||||
const keys = {};
|
||||
[
|
||||
'',
|
||||
'active',
|
||||
'wait',
|
||||
'waiting-children',
|
||||
'paused',
|
||||
'id',
|
||||
'delayed',
|
||||
'prioritized',
|
||||
'stalled-check',
|
||||
'completed',
|
||||
'failed',
|
||||
'stalled',
|
||||
'repeat',
|
||||
'limiter',
|
||||
'meta',
|
||||
'events',
|
||||
'pc', // priority counter key
|
||||
'marker', // marker key
|
||||
'de', // deduplication key
|
||||
].forEach(key => {
|
||||
keys[key] = this.toKey(name, key);
|
||||
});
|
||||
return keys;
|
||||
}
|
||||
toKey(name, type) {
|
||||
return `${this.getQueueQualifiedName(name)}:${type}`;
|
||||
}
|
||||
getQueueQualifiedName(name) {
|
||||
return `${this.prefix}:${name}`;
|
||||
}
|
||||
}
|
||||
exports.QueueKeys = QueueKeys;
|
||||
//# sourceMappingURL=queue-keys.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue-keys.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue-keys.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"queue-keys.js","sourceRoot":"","sources":["../../../src/classes/queue-keys.ts"],"names":[],"mappings":";;;AAEA,MAAa,SAAS;IACpB,YAA4B,SAAS,MAAM;QAAf,WAAM,GAAN,MAAM,CAAS;IAAG,CAAC;IAE/C,OAAO,CAAC,IAAY;QAClB,MAAM,IAAI,GAAgC,EAAE,CAAC;QAC7C;YACE,EAAE;YACF,QAAQ;YACR,MAAM;YACN,kBAAkB;YAClB,QAAQ;YACR,IAAI;YACJ,SAAS;YACT,aAAa;YACb,eAAe;YACf,WAAW;YACX,QAAQ;YACR,SAAS;YACT,QAAQ;YACR,SAAS;YACT,MAAM;YACN,QAAQ;YACR,IAAI,EAAE,uBAAuB;YAC7B,QAAQ,EAAE,aAAa;YACvB,IAAI,EAAE,oBAAoB;SAC3B,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE;YACd,IAAI,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,EAAE,GAAG,CAAC,CAAC;QACpC,CAAC,CAAC,CAAC;QAEH,OAAO,IAAI,CAAC;IACd,CAAC;IAED,KAAK,CAAC,IAAY,EAAE,IAAY;QAC9B,OAAO,GAAG,IAAI,CAAC,qBAAqB,CAAC,IAAI,CAAC,IAAI,IAAI,EAAE,CAAC;IACvD,CAAC;IAED,qBAAqB,CAAC,IAAY;QAChC,OAAO,GAAG,IAAI,CAAC,MAAM,IAAI,IAAI,EAAE,CAAC;IAClC,CAAC;CACF;AAvCD,8BAuCC"}
|
||||
652
backend/node_modules/bullmq/dist/cjs/classes/queue.js
generated
vendored
Normal file
652
backend/node_modules/bullmq/dist/cjs/classes/queue.js
generated
vendored
Normal file
@@ -0,0 +1,652 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Queue = void 0;
|
||||
const uuid_1 = require("uuid");
|
||||
const job_1 = require("./job");
|
||||
const queue_getters_1 = require("./queue-getters");
|
||||
const repeat_1 = require("./repeat");
|
||||
const enums_1 = require("../enums");
|
||||
const job_scheduler_1 = require("./job-scheduler");
|
||||
const version_1 = require("../version");
|
||||
/**
|
||||
* Queue
|
||||
*
|
||||
* This class provides methods to add jobs to a queue and some other high-level
|
||||
* administration such as pausing or deleting queues.
|
||||
*
|
||||
* @typeParam DataType - The type of the data that the job will process.
|
||||
* @typeParam ResultType - The type of the result of the job.
|
||||
* @typeParam NameType - The type of the name of the job.
|
||||
*
|
||||
* @example
|
||||
*
|
||||
* ```typescript
|
||||
* import { Queue } from 'bullmq';
|
||||
*
|
||||
* interface MyDataType {
|
||||
* foo: string;
|
||||
* }
|
||||
*
|
||||
* interface MyResultType {
|
||||
* bar: string;
|
||||
* }
|
||||
*
|
||||
* const queue = new Queue<MyDataType, MyResultType, "blue" | "brown">('myQueue');
|
||||
* ```
|
||||
*/
|
||||
class Queue extends queue_getters_1.QueueGetters {
|
||||
constructor(name, opts, Connection) {
|
||||
var _a;
|
||||
super(name, Object.assign({}, opts), Connection);
|
||||
this.token = (0, uuid_1.v4)();
|
||||
this.libName = 'bullmq';
|
||||
this.jobsOpts = (_a = opts === null || opts === void 0 ? void 0 : opts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
|
||||
this.waitUntilReady()
|
||||
.then(client => {
|
||||
if (!this.closing && !(opts === null || opts === void 0 ? void 0 : opts.skipMetasUpdate)) {
|
||||
return client.hmset(this.keys.meta, this.metaValues);
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
// We ignore this error to avoid warnings. The error can still
|
||||
// be received by listening to event 'error'
|
||||
});
|
||||
}
|
||||
emit(event, ...args) {
|
||||
return super.emit(event, ...args);
|
||||
}
|
||||
off(eventName, listener) {
|
||||
super.off(eventName, listener);
|
||||
return this;
|
||||
}
|
||||
on(event, listener) {
|
||||
super.on(event, listener);
|
||||
return this;
|
||||
}
|
||||
once(event, listener) {
|
||||
super.once(event, listener);
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Returns this instance current default job options.
|
||||
*/
|
||||
get defaultJobOptions() {
|
||||
return Object.assign({}, this.jobsOpts);
|
||||
}
|
||||
get metaValues() {
|
||||
var _a, _b, _c, _d;
|
||||
return {
|
||||
'opts.maxLenEvents': (_d = (_c = (_b = (_a = this.opts) === null || _a === void 0 ? void 0 : _a.streams) === null || _b === void 0 ? void 0 : _b.events) === null || _c === void 0 ? void 0 : _c.maxLen) !== null && _d !== void 0 ? _d : 10000,
|
||||
version: `${this.libName}:${version_1.version}`,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Get library version.
|
||||
*
|
||||
* @returns the content of the meta.library field.
|
||||
*/
|
||||
async getVersion() {
|
||||
const client = await this.client;
|
||||
return await client.hget(this.keys.meta, 'version');
|
||||
}
|
||||
get repeat() {
|
||||
return new Promise(async (resolve) => {
|
||||
if (!this._repeat) {
|
||||
this._repeat = new repeat_1.Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
|
||||
this._repeat.on('error', e => this.emit.bind(this, e));
|
||||
}
|
||||
resolve(this._repeat);
|
||||
});
|
||||
}
|
||||
get jobScheduler() {
|
||||
return new Promise(async (resolve) => {
|
||||
if (!this._jobScheduler) {
|
||||
this._jobScheduler = new job_scheduler_1.JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
|
||||
this._jobScheduler.on('error', e => this.emit.bind(this, e));
|
||||
}
|
||||
resolve(this._jobScheduler);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Enable and set global concurrency value.
|
||||
* @param concurrency - Maximum number of simultaneous jobs that the workers can handle.
|
||||
* For instance, setting this value to 1 ensures that no more than one job
|
||||
* is processed at any given time. If this limit is not defined, there will be no
|
||||
* restriction on the number of concurrent jobs.
|
||||
*/
|
||||
async setGlobalConcurrency(concurrency) {
|
||||
const client = await this.client;
|
||||
return client.hset(this.keys.meta, 'concurrency', concurrency);
|
||||
}
|
||||
/**
|
||||
* Enable and set rate limit.
|
||||
* @param max - Max number of jobs to process in the time period specified in `duration`
|
||||
* @param duration - Time in milliseconds. During this time, a maximum of `max` jobs will be processed.
|
||||
*/
|
||||
async setGlobalRateLimit(max, duration) {
|
||||
const client = await this.client;
|
||||
return client.hset(this.keys.meta, 'max', max, 'duration', duration);
|
||||
}
|
||||
/**
|
||||
* Remove global concurrency value.
|
||||
*/
|
||||
async removeGlobalConcurrency() {
|
||||
const client = await this.client;
|
||||
return client.hdel(this.keys.meta, 'concurrency');
|
||||
}
|
||||
/**
|
||||
* Remove global rate limit values.
|
||||
*/
|
||||
async removeGlobalRateLimit() {
|
||||
const client = await this.client;
|
||||
return client.hdel(this.keys.meta, 'max', 'duration');
|
||||
}
|
||||
/**
|
||||
* Adds a new job to the queue.
|
||||
*
|
||||
* @param name - Name of the job to be added to the queue.
|
||||
* @param data - Arbitrary data to append to the job.
|
||||
* @param opts - Job options that affects how the job is going to be processed.
|
||||
*/
|
||||
async add(name, data, opts) {
|
||||
return this.trace(enums_1.SpanKind.PRODUCER, 'add', `${this.name}.${name}`, async (span, srcPropagationMedatada) => {
|
||||
var _a;
|
||||
if (srcPropagationMedatada && !((_a = opts === null || opts === void 0 ? void 0 : opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext)) {
|
||||
const telemetry = {
|
||||
metadata: srcPropagationMedatada,
|
||||
};
|
||||
opts = Object.assign(Object.assign({}, opts), { telemetry });
|
||||
}
|
||||
const job = await this.addJob(name, data, opts);
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobName]: name,
|
||||
[enums_1.TelemetryAttributes.JobId]: job.id,
|
||||
});
|
||||
return job;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* addJob is a telemetry free version of the add method, useful in order to wrap it
|
||||
* with custom telemetry on subclasses.
|
||||
*
|
||||
* @param name - Name of the job to be added to the queue.
|
||||
* @param data - Arbitrary data to append to the job.
|
||||
* @param opts - Job options that affects how the job is going to be processed.
|
||||
*
|
||||
* @returns Job
|
||||
*/
|
||||
async addJob(name, data, opts) {
|
||||
if (opts && opts.repeat) {
|
||||
if (opts.repeat.endDate) {
|
||||
if (+new Date(opts.repeat.endDate) < Date.now()) {
|
||||
throw new Error('End date must be greater than current timestamp');
|
||||
}
|
||||
}
|
||||
return (await this.repeat).updateRepeatableJob(name, data, Object.assign(Object.assign({}, this.jobsOpts), opts), { override: true });
|
||||
}
|
||||
else {
|
||||
const jobId = opts === null || opts === void 0 ? void 0 : opts.jobId;
|
||||
if (jobId == '0' || (jobId === null || jobId === void 0 ? void 0 : jobId.startsWith('0:'))) {
|
||||
throw new Error("JobId cannot be '0' or start with 0:");
|
||||
}
|
||||
const job = await this.Job.create(this, name, data, Object.assign(Object.assign(Object.assign({}, this.jobsOpts), opts), { jobId }));
|
||||
this.emit('waiting', job);
|
||||
return job;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Adds an array of jobs to the queue. This method may be faster than adding
|
||||
* one job at a time in a sequence.
|
||||
*
|
||||
* @param jobs - The array of jobs to add to the queue. Each job is defined by 3
|
||||
* properties, 'name', 'data' and 'opts'. They follow the same signature as 'Queue.add'.
|
||||
*/
|
||||
async addBulk(jobs) {
|
||||
return this.trace(enums_1.SpanKind.PRODUCER, 'addBulk', this.name, async (span, srcPropagationMedatada) => {
|
||||
if (span) {
|
||||
span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.BulkNames]: jobs.map(job => job.name),
|
||||
[enums_1.TelemetryAttributes.BulkCount]: jobs.length,
|
||||
});
|
||||
}
|
||||
return await this.Job.createBulk(this, jobs.map(job => {
|
||||
var _a, _b, _c, _d, _e, _f;
|
||||
let telemetry = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry;
|
||||
if (srcPropagationMedatada) {
|
||||
const omitContext = (_c = (_b = job.opts) === null || _b === void 0 ? void 0 : _b.telemetry) === null || _c === void 0 ? void 0 : _c.omitContext;
|
||||
const telemetryMetadata = ((_e = (_d = job.opts) === null || _d === void 0 ? void 0 : _d.telemetry) === null || _e === void 0 ? void 0 : _e.metadata) ||
|
||||
(!omitContext && srcPropagationMedatada);
|
||||
if (telemetryMetadata || omitContext) {
|
||||
telemetry = {
|
||||
metadata: telemetryMetadata,
|
||||
omitContext,
|
||||
};
|
||||
}
|
||||
}
|
||||
return {
|
||||
name: job.name,
|
||||
data: job.data,
|
||||
opts: Object.assign(Object.assign(Object.assign({}, this.jobsOpts), job.opts), { jobId: (_f = job.opts) === null || _f === void 0 ? void 0 : _f.jobId, telemetry }),
|
||||
};
|
||||
}));
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Upserts a scheduler.
|
||||
*
|
||||
* A scheduler is a job factory that creates jobs at a given interval.
|
||||
* Upserting a scheduler will create a new job scheduler or update an existing one.
|
||||
* It will also create the first job based on the repeat options and delayed accordingly.
|
||||
*
|
||||
* @param key - Unique key for the repeatable job meta.
|
||||
* @param repeatOpts - Repeat options
|
||||
* @param jobTemplate - Job template. If provided it will be used for all the jobs
|
||||
* created by the scheduler.
|
||||
*
|
||||
* @returns The next job to be scheduled (would normally be in delayed state).
|
||||
*/
|
||||
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobTemplate) {
|
||||
var _a, _b;
|
||||
if (repeatOpts.endDate) {
|
||||
if (+new Date(repeatOpts.endDate) < Date.now()) {
|
||||
throw new Error('End date must be greater than current timestamp');
|
||||
}
|
||||
}
|
||||
return (await this.jobScheduler).upsertJobScheduler(jobSchedulerId, repeatOpts, (_a = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.name) !== null && _a !== void 0 ? _a : jobSchedulerId, (_b = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.data) !== null && _b !== void 0 ? _b : {}, Object.assign(Object.assign({}, this.jobsOpts), jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.opts), { override: true });
|
||||
}
|
||||
/**
|
||||
* Pauses the processing of this queue globally.
|
||||
*
|
||||
* We use an atomic RENAME operation on the wait queue. Since
|
||||
* we have blocking calls with BRPOPLPUSH on the wait queue, as long as the queue
|
||||
* is renamed to 'paused', no new jobs will be processed (the current ones
|
||||
* will run until finalized).
|
||||
*
|
||||
* Adding jobs requires a LUA script to check first if the paused list exist
|
||||
* and in that case it will add it there instead of the wait list.
|
||||
*/
|
||||
async pause() {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'pause', this.name, async () => {
|
||||
await this.scripts.pause(true);
|
||||
this.emit('paused');
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Close the queue instance.
|
||||
*
|
||||
*/
|
||||
async close() {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'close', this.name, async () => {
|
||||
if (!this.closing) {
|
||||
if (this._repeat) {
|
||||
await this._repeat.close();
|
||||
}
|
||||
}
|
||||
await super.close();
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Overrides the rate limit to be active for the next jobs.
|
||||
*
|
||||
* @param expireTimeMs - expire time in ms of this rate limit.
|
||||
*/
|
||||
async rateLimit(expireTimeMs) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueRateLimit]: expireTimeMs,
|
||||
});
|
||||
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Resumes the processing of this queue globally.
|
||||
*
|
||||
* The method reverses the pause operation by resuming the processing of the
|
||||
* queue.
|
||||
*/
|
||||
async resume() {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'resume', this.name, async () => {
|
||||
await this.scripts.pause(false);
|
||||
this.emit('resumed');
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Returns true if the queue is currently paused.
|
||||
*/
|
||||
async isPaused() {
|
||||
const client = await this.client;
|
||||
const pausedKeyExists = await client.hexists(this.keys.meta, 'paused');
|
||||
return pausedKeyExists === 1;
|
||||
}
|
||||
/**
|
||||
* Returns true if the queue is currently maxed.
|
||||
*/
|
||||
isMaxed() {
|
||||
return this.scripts.isMaxed();
|
||||
}
|
||||
/**
|
||||
* Get all repeatable meta jobs.
|
||||
*
|
||||
* @deprecated This method is deprecated and will be removed in v6. Use getJobSchedulers instead.
|
||||
*
|
||||
* @param start - Offset of first job to return.
|
||||
* @param end - Offset of last job to return.
|
||||
* @param asc - Determine the order in which jobs are returned based on their
|
||||
* next execution time.
|
||||
*/
|
||||
async getRepeatableJobs(start, end, asc) {
|
||||
return (await this.repeat).getRepeatableJobs(start, end, asc);
|
||||
}
|
||||
/**
|
||||
* Get Job Scheduler by id
|
||||
*
|
||||
* @param id - identifier of scheduler.
|
||||
*/
|
||||
async getJobScheduler(id) {
|
||||
return (await this.jobScheduler).getScheduler(id);
|
||||
}
|
||||
/**
|
||||
* Get all Job Schedulers
|
||||
*
|
||||
* @param start - Offset of first scheduler to return.
|
||||
* @param end - Offset of last scheduler to return.
|
||||
* @param asc - Determine the order in which schedulers are returned based on their
|
||||
* next execution time.
|
||||
*/
|
||||
async getJobSchedulers(start, end, asc) {
|
||||
return (await this.jobScheduler).getJobSchedulers(start, end, asc);
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Get the number of job schedulers.
|
||||
*
|
||||
* @returns The number of job schedulers.
|
||||
*/
|
||||
async getJobSchedulersCount() {
|
||||
return (await this.jobScheduler).getSchedulersCount();
|
||||
}
|
||||
/**
|
||||
* Removes a repeatable job.
|
||||
*
|
||||
* Note: you need to use the exact same repeatOpts when deleting a repeatable job
|
||||
* than when adding it.
|
||||
*
|
||||
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
|
||||
*
|
||||
* @see removeRepeatableByKey
|
||||
*
|
||||
* @param name - Job name
|
||||
* @param repeatOpts - Repeat options
|
||||
* @param jobId - Job id to remove. If not provided, all jobs with the same repeatOpts
|
||||
* @returns
|
||||
*/
|
||||
async removeRepeatable(name, repeatOpts, jobId) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'removeRepeatable', `${this.name}.${name}`, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobName]: name,
|
||||
[enums_1.TelemetryAttributes.JobId]: jobId,
|
||||
});
|
||||
const repeat = await this.repeat;
|
||||
const removed = await repeat.removeRepeatable(name, repeatOpts, jobId);
|
||||
return !removed;
|
||||
});
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Removes a job scheduler.
|
||||
*
|
||||
* @param jobSchedulerId - identifier of the job scheduler.
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async removeJobScheduler(jobSchedulerId) {
|
||||
const jobScheduler = await this.jobScheduler;
|
||||
const removed = await jobScheduler.removeJobScheduler(jobSchedulerId);
|
||||
return !removed;
|
||||
}
|
||||
/**
|
||||
* Removes a debounce key.
|
||||
* @deprecated use removeDeduplicationKey
|
||||
*
|
||||
* @param id - debounce identifier
|
||||
*/
|
||||
async removeDebounceKey(id) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'removeDebounceKey', `${this.name}`, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobKey]: id,
|
||||
});
|
||||
const client = await this.client;
|
||||
return await client.del(`${this.keys.de}:${id}`);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Removes a deduplication key.
|
||||
*
|
||||
* @param id - identifier
|
||||
*/
|
||||
async removeDeduplicationKey(id) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'removeDeduplicationKey', `${this.name}`, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.DeduplicationKey]: id,
|
||||
});
|
||||
const client = await this.client;
|
||||
return client.del(`${this.keys.de}:${id}`);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Removes rate limit key.
|
||||
*/
|
||||
async removeRateLimitKey() {
|
||||
const client = await this.client;
|
||||
return client.del(this.keys.limiter);
|
||||
}
|
||||
/**
|
||||
* Removes a repeatable job by its key. Note that the key is the one used
|
||||
* to store the repeatable job metadata and not one of the job iterations
|
||||
* themselves. You can use "getRepeatableJobs" in order to get the keys.
|
||||
*
|
||||
* @see getRepeatableJobs
|
||||
*
|
||||
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
|
||||
*
|
||||
* @param repeatJobKey - To the repeatable job.
|
||||
* @returns
|
||||
*/
|
||||
async removeRepeatableByKey(key) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'removeRepeatableByKey', `${this.name}`, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobKey]: key,
|
||||
});
|
||||
const repeat = await this.repeat;
|
||||
const removed = await repeat.removeRepeatableByKey(key);
|
||||
return !removed;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Removes the given job from the queue as well as all its
|
||||
* dependencies.
|
||||
*
|
||||
* @param jobId - The id of the job to remove
|
||||
* @param opts - Options to remove a job
|
||||
* @returns 1 if it managed to remove the job or 0 if the job or
|
||||
* any of its dependencies were locked.
|
||||
*/
|
||||
async remove(jobId, { removeChildren = true } = {}) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'remove', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobId]: jobId,
|
||||
[enums_1.TelemetryAttributes.JobOptions]: JSON.stringify({
|
||||
removeChildren,
|
||||
}),
|
||||
});
|
||||
const code = await this.scripts.remove(jobId, removeChildren);
|
||||
if (code === 1) {
|
||||
this.emit('removed', jobId);
|
||||
}
|
||||
return code;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Updates the given job's progress.
|
||||
*
|
||||
* @param jobId - The id of the job to update
|
||||
* @param progress - Number or object to be saved as progress.
|
||||
*/
|
||||
async updateJobProgress(jobId, progress) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'updateJobProgress', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobId]: jobId,
|
||||
[enums_1.TelemetryAttributes.JobProgress]: JSON.stringify(progress),
|
||||
});
|
||||
await this.scripts.updateProgress(jobId, progress);
|
||||
this.emit('progress', jobId, progress);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Logs one row of job's log data.
|
||||
*
|
||||
* @param jobId - The job id to log against.
|
||||
* @param logRow - String with log data to be logged.
|
||||
* @param keepLogs - Max number of log entries to keep (0 for unlimited).
|
||||
*
|
||||
* @returns The total number of log entries for this job so far.
|
||||
*/
|
||||
async addJobLog(jobId, logRow, keepLogs) {
|
||||
return job_1.Job.addJobLog(this, jobId, logRow, keepLogs);
|
||||
}
|
||||
/**
|
||||
* Drains the queue, i.e., removes all jobs that are waiting
|
||||
* or delayed, but not active, completed or failed.
|
||||
*
|
||||
* @param delayed - Pass true if it should also clean the
|
||||
* delayed jobs.
|
||||
*/
|
||||
async drain(delayed = false) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'drain', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueDrainDelay]: delayed,
|
||||
});
|
||||
await this.scripts.drain(delayed);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Cleans jobs from a queue. Similar to drain but keeps jobs within a certain
|
||||
* grace period.
|
||||
*
|
||||
* @param grace - The grace period in milliseconds
|
||||
* @param limit - Max number of jobs to clean
|
||||
* @param type - The type of job to clean
|
||||
* Possible values are completed, wait, active, paused, delayed, failed. Defaults to completed.
|
||||
* @returns Id jobs from the deleted records
|
||||
*/
|
||||
async clean(grace, limit, type = 'completed') {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'clean', this.name, async (span) => {
|
||||
const maxCount = limit || Infinity;
|
||||
const maxCountPerCall = Math.min(10000, maxCount);
|
||||
const timestamp = Date.now() - grace;
|
||||
let deletedCount = 0;
|
||||
const deletedJobsIds = [];
|
||||
// Normalize 'waiting' to 'wait' for consistency with internal Redis keys
|
||||
const normalizedType = type === 'waiting' ? 'wait' : type;
|
||||
while (deletedCount < maxCount) {
|
||||
const jobsIds = await this.scripts.cleanJobsInSet(normalizedType, timestamp, maxCountPerCall);
|
||||
this.emit('cleaned', jobsIds, normalizedType);
|
||||
deletedCount += jobsIds.length;
|
||||
deletedJobsIds.push(...jobsIds);
|
||||
if (jobsIds.length < maxCountPerCall) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueGrace]: grace,
|
||||
[enums_1.TelemetryAttributes.JobType]: type,
|
||||
[enums_1.TelemetryAttributes.QueueCleanLimit]: maxCount,
|
||||
[enums_1.TelemetryAttributes.JobIds]: deletedJobsIds,
|
||||
});
|
||||
return deletedJobsIds;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Completely destroys the queue and all of its contents irreversibly.
|
||||
* This method will *pause* the queue and requires that there are no
|
||||
* active jobs. It is possible to bypass this requirement, i.e. not
|
||||
* having active jobs using the "force" option.
|
||||
*
|
||||
* Note: This operation requires to iterate on all the jobs stored in the queue
|
||||
* and can be slow for very large queues.
|
||||
*
|
||||
* @param opts - Obliterate options.
|
||||
*/
|
||||
async obliterate(opts) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'obliterate', this.name, async () => {
|
||||
await this.pause();
|
||||
let cursor = 0;
|
||||
do {
|
||||
cursor = await this.scripts.obliterate(Object.assign({ force: false, count: 1000 }, opts));
|
||||
} while (cursor);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Retry all the failed or completed jobs.
|
||||
*
|
||||
* @param opts - An object with the following properties:
|
||||
* - count number to limit how many jobs will be moved to wait status per iteration,
|
||||
* - state failed by default or completed.
|
||||
* - timestamp from which timestamp to start moving jobs to wait status, default Date.now().
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async retryJobs(opts = {}) {
|
||||
await this.trace(enums_1.SpanKind.PRODUCER, 'retryJobs', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
|
||||
});
|
||||
let cursor = 0;
|
||||
do {
|
||||
cursor = await this.scripts.retryJobs(opts.state, opts.count, opts.timestamp);
|
||||
} while (cursor);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Promote all the delayed jobs.
|
||||
*
|
||||
* @param opts - An object with the following properties:
|
||||
* - count number to limit how many jobs will be moved to wait status per iteration
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async promoteJobs(opts = {}) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'promoteJobs', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
|
||||
});
|
||||
let cursor = 0;
|
||||
do {
|
||||
cursor = await this.scripts.promoteJobs(opts.count);
|
||||
} while (cursor);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Trim the event stream to an approximately maxLength.
|
||||
*
|
||||
* @param maxLength -
|
||||
*/
|
||||
async trimEvents(maxLength) {
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'trimEvents', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.QueueEventMaxLength]: maxLength,
|
||||
});
|
||||
const client = await this.client;
|
||||
return await client.xtrim(this.keys.events, 'MAXLEN', '~', maxLength);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Delete old priority helper key.
|
||||
*/
|
||||
async removeDeprecatedPriorityKey() {
|
||||
const client = await this.client;
|
||||
return client.del(this.toKey('priority'));
|
||||
}
|
||||
}
|
||||
exports.Queue = Queue;
|
||||
//# sourceMappingURL=queue.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/queue.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/queue.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
277
backend/node_modules/bullmq/dist/cjs/classes/redis-connection.js
generated
vendored
Normal file
277
backend/node_modules/bullmq/dist/cjs/classes/redis-connection.js
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RedisConnection = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const events_1 = require("events");
|
||||
const ioredis_1 = require("ioredis");
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
const utils_1 = require("ioredis/built/utils");
|
||||
const utils_2 = require("../utils");
|
||||
const version_1 = require("../version");
|
||||
const scripts = require("../scripts");
|
||||
const overrideMessage = [
|
||||
'BullMQ: WARNING! Your redis options maxRetriesPerRequest must be null',
|
||||
'and will be overridden by BullMQ.',
|
||||
].join(' ');
|
||||
const deprecationMessage = 'BullMQ: Your redis options maxRetriesPerRequest must be null.';
|
||||
class RedisConnection extends events_1.EventEmitter {
|
||||
constructor(opts, extraOptions) {
|
||||
super();
|
||||
this.extraOptions = extraOptions;
|
||||
this.capabilities = {
|
||||
canDoubleTimeout: false,
|
||||
canBlockFor1Ms: true,
|
||||
};
|
||||
this.status = 'initializing';
|
||||
this.packageVersion = version_1.version;
|
||||
// Set extra options defaults
|
||||
this.extraOptions = Object.assign({ shared: false, blocking: true, skipVersionCheck: false, skipWaitingForReady: false }, extraOptions);
|
||||
if (!(0, utils_2.isRedisInstance)(opts)) {
|
||||
this.checkBlockingOptions(overrideMessage, opts);
|
||||
this.opts = Object.assign({ port: 6379, host: '127.0.0.1', retryStrategy: function (times) {
|
||||
return Math.max(Math.min(Math.exp(times), 20000), 1000);
|
||||
} }, opts);
|
||||
if (this.extraOptions.blocking) {
|
||||
this.opts.maxRetriesPerRequest = null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
this._client = opts;
|
||||
// Test if the redis instance is using keyPrefix
|
||||
// and if so, throw an error.
|
||||
if (this._client.options.keyPrefix) {
|
||||
throw new Error('BullMQ: ioredis does not support ioredis prefixes, use the prefix option instead.');
|
||||
}
|
||||
if ((0, utils_2.isRedisCluster)(this._client)) {
|
||||
this.opts = this._client.options.redisOptions;
|
||||
}
|
||||
else {
|
||||
this.opts = this._client.options;
|
||||
}
|
||||
this.checkBlockingOptions(deprecationMessage, this.opts, true);
|
||||
}
|
||||
this.skipVersionCheck =
|
||||
(extraOptions === null || extraOptions === void 0 ? void 0 : extraOptions.skipVersionCheck) ||
|
||||
!!(this.opts && this.opts.skipVersionCheck);
|
||||
this.handleClientError = (err) => {
|
||||
this.emit('error', err);
|
||||
};
|
||||
this.handleClientClose = () => {
|
||||
this.emit('close');
|
||||
};
|
||||
this.handleClientReady = () => {
|
||||
this.emit('ready');
|
||||
};
|
||||
this.initializing = this.init();
|
||||
this.initializing.catch(err => this.emit('error', err));
|
||||
}
|
||||
checkBlockingOptions(msg, options, throwError = false) {
|
||||
if (this.extraOptions.blocking && options && options.maxRetriesPerRequest) {
|
||||
if (throwError) {
|
||||
throw new Error(msg);
|
||||
}
|
||||
else {
|
||||
console.error(msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Waits for a redis client to be ready.
|
||||
* @param redis - client
|
||||
*/
|
||||
static async waitUntilReady(client) {
|
||||
if (client.status === 'ready') {
|
||||
return;
|
||||
}
|
||||
if (client.status === 'wait') {
|
||||
return client.connect();
|
||||
}
|
||||
if (client.status === 'end') {
|
||||
throw new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG);
|
||||
}
|
||||
let handleReady;
|
||||
let handleEnd;
|
||||
let handleError;
|
||||
try {
|
||||
await new Promise((resolve, reject) => {
|
||||
let lastError;
|
||||
handleError = (err) => {
|
||||
lastError = err;
|
||||
};
|
||||
handleReady = () => {
|
||||
resolve();
|
||||
};
|
||||
handleEnd = () => {
|
||||
if (client.status !== 'end') {
|
||||
reject(lastError || new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
}
|
||||
else {
|
||||
if (lastError) {
|
||||
reject(lastError);
|
||||
}
|
||||
else {
|
||||
// when custon 'end' status is set we already closed
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
};
|
||||
(0, utils_2.increaseMaxListeners)(client, 3);
|
||||
client.once('ready', handleReady);
|
||||
client.on('end', handleEnd);
|
||||
client.once('error', handleError);
|
||||
});
|
||||
}
|
||||
finally {
|
||||
client.removeListener('end', handleEnd);
|
||||
client.removeListener('error', handleError);
|
||||
client.removeListener('ready', handleReady);
|
||||
(0, utils_2.decreaseMaxListeners)(client, 3);
|
||||
}
|
||||
}
|
||||
get client() {
|
||||
return this.initializing;
|
||||
}
|
||||
loadCommands(packageVersion, providedScripts) {
|
||||
const finalScripts = providedScripts || scripts;
|
||||
for (const property in finalScripts) {
|
||||
// Only define the command if not already defined
|
||||
const commandName = `${finalScripts[property].name}:${packageVersion}`;
|
||||
if (!this._client[commandName]) {
|
||||
this._client.defineCommand(commandName, {
|
||||
numberOfKeys: finalScripts[property].keys,
|
||||
lua: finalScripts[property].content,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
async init() {
|
||||
if (!this._client) {
|
||||
const _a = this.opts, { url } = _a, rest = tslib_1.__rest(_a, ["url"]);
|
||||
this._client = url ? new ioredis_1.default(url, rest) : new ioredis_1.default(rest);
|
||||
}
|
||||
(0, utils_2.increaseMaxListeners)(this._client, 3);
|
||||
this._client.on('error', this.handleClientError);
|
||||
// ioredis treats connection errors as a different event ('close')
|
||||
this._client.on('close', this.handleClientClose);
|
||||
this._client.on('ready', this.handleClientReady);
|
||||
if (!this.extraOptions.skipWaitingForReady) {
|
||||
await RedisConnection.waitUntilReady(this._client);
|
||||
}
|
||||
this.loadCommands(this.packageVersion);
|
||||
if (this._client['status'] !== 'end') {
|
||||
this.version = await this.getRedisVersion();
|
||||
if (this.skipVersionCheck !== true && !this.closing) {
|
||||
if ((0, utils_2.isRedisVersionLowerThan)(this.version, RedisConnection.minimumVersion)) {
|
||||
throw new Error(`Redis version needs to be greater or equal than ${RedisConnection.minimumVersion} ` +
|
||||
`Current: ${this.version}`);
|
||||
}
|
||||
if ((0, utils_2.isRedisVersionLowerThan)(this.version, RedisConnection.recommendedMinimumVersion)) {
|
||||
console.warn(`It is highly recommended to use a minimum Redis version of ${RedisConnection.recommendedMinimumVersion}
|
||||
Current: ${this.version}`);
|
||||
}
|
||||
}
|
||||
this.capabilities = {
|
||||
canDoubleTimeout: !(0, utils_2.isRedisVersionLowerThan)(this.version, '6.0.0'),
|
||||
canBlockFor1Ms: !(0, utils_2.isRedisVersionLowerThan)(this.version, '7.0.8'),
|
||||
};
|
||||
this.status = 'ready';
|
||||
}
|
||||
return this._client;
|
||||
}
|
||||
async disconnect(wait = true) {
|
||||
const client = await this.client;
|
||||
if (client.status !== 'end') {
|
||||
let _resolve, _reject;
|
||||
if (!wait) {
|
||||
return client.disconnect();
|
||||
}
|
||||
const disconnecting = new Promise((resolve, reject) => {
|
||||
(0, utils_2.increaseMaxListeners)(client, 2);
|
||||
client.once('end', resolve);
|
||||
client.once('error', reject);
|
||||
_resolve = resolve;
|
||||
_reject = reject;
|
||||
});
|
||||
client.disconnect();
|
||||
try {
|
||||
await disconnecting;
|
||||
}
|
||||
finally {
|
||||
(0, utils_2.decreaseMaxListeners)(client, 2);
|
||||
client.removeListener('end', _resolve);
|
||||
client.removeListener('error', _reject);
|
||||
}
|
||||
}
|
||||
}
|
||||
async reconnect() {
|
||||
const client = await this.client;
|
||||
return client.connect();
|
||||
}
|
||||
async close(force = false) {
|
||||
if (!this.closing) {
|
||||
const status = this.status;
|
||||
this.status = 'closing';
|
||||
this.closing = true;
|
||||
try {
|
||||
if (status === 'ready') {
|
||||
// Not sure if we need to wait for this
|
||||
await this.initializing;
|
||||
}
|
||||
if (!this.extraOptions.shared) {
|
||||
if (status == 'initializing' || force) {
|
||||
// If we have not still connected to Redis, we need to disconnect.
|
||||
this._client.disconnect();
|
||||
}
|
||||
else {
|
||||
await this._client.quit();
|
||||
}
|
||||
// As IORedis does not update this status properly, we do it ourselves.
|
||||
this._client['status'] = 'end';
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if ((0, utils_2.isNotConnectionError)(error)) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
this._client.off('error', this.handleClientError);
|
||||
this._client.off('close', this.handleClientClose);
|
||||
this._client.off('ready', this.handleClientReady);
|
||||
(0, utils_2.decreaseMaxListeners)(this._client, 3);
|
||||
this.removeAllListeners();
|
||||
this.status = 'closed';
|
||||
}
|
||||
}
|
||||
}
|
||||
async getRedisVersion() {
|
||||
if (this.skipVersionCheck) {
|
||||
return RedisConnection.minimumVersion;
|
||||
}
|
||||
const doc = await this._client.info();
|
||||
const redisPrefix = 'redis_version:';
|
||||
const maxMemoryPolicyPrefix = 'maxmemory_policy:';
|
||||
const lines = doc.split(/\r?\n/);
|
||||
let redisVersion;
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
if (lines[i].indexOf(maxMemoryPolicyPrefix) === 0) {
|
||||
const maxMemoryPolicy = lines[i].substr(maxMemoryPolicyPrefix.length);
|
||||
if (maxMemoryPolicy !== 'noeviction') {
|
||||
console.warn(`IMPORTANT! Eviction policy is ${maxMemoryPolicy}. It should be "noeviction"`);
|
||||
}
|
||||
}
|
||||
if (lines[i].indexOf(redisPrefix) === 0) {
|
||||
redisVersion = lines[i].substr(redisPrefix.length);
|
||||
}
|
||||
}
|
||||
return redisVersion;
|
||||
}
|
||||
get redisVersion() {
|
||||
return this.version;
|
||||
}
|
||||
}
|
||||
exports.RedisConnection = RedisConnection;
|
||||
RedisConnection.minimumVersion = '5.0.0';
|
||||
RedisConnection.recommendedMinimumVersion = '6.2.0';
|
||||
//# sourceMappingURL=redis-connection.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/redis-connection.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/redis-connection.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
204
backend/node_modules/bullmq/dist/cjs/classes/repeat.js
generated
vendored
Normal file
204
backend/node_modules/bullmq/dist/cjs/classes/repeat.js
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getNextMillis = exports.Repeat = void 0;
|
||||
const tslib_1 = require("tslib");
|
||||
const cron_parser_1 = require("cron-parser");
|
||||
const crypto_1 = require("crypto");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
class Repeat extends queue_base_1.QueueBase {
|
||||
constructor(name, opts, Connection) {
|
||||
super(name, opts, Connection);
|
||||
this.repeatStrategy =
|
||||
(opts.settings && opts.settings.repeatStrategy) || exports.getNextMillis;
|
||||
this.repeatKeyHashAlgorithm =
|
||||
(opts.settings && opts.settings.repeatKeyHashAlgorithm) || 'md5';
|
||||
}
|
||||
async updateRepeatableJob(name, data, opts, { override }) {
|
||||
var _a, _b;
|
||||
// Backwards compatibility for repeatable jobs for versions <= 3.0.0
|
||||
const repeatOpts = Object.assign({}, opts.repeat);
|
||||
(_a = repeatOpts.pattern) !== null && _a !== void 0 ? _a : (repeatOpts.pattern = repeatOpts.cron);
|
||||
delete repeatOpts.cron;
|
||||
// Check if we reached the limit of the repeatable job's iterations
|
||||
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
|
||||
if (typeof repeatOpts.limit !== 'undefined' &&
|
||||
iterationCount > repeatOpts.limit) {
|
||||
return;
|
||||
}
|
||||
// Check if we reached the end date of the repeatable job
|
||||
let now = Date.now();
|
||||
const { endDate } = repeatOpts;
|
||||
if (endDate && now > new Date(endDate).getTime()) {
|
||||
return;
|
||||
}
|
||||
const prevMillis = opts.prevMillis || 0;
|
||||
now = prevMillis < now ? now : prevMillis;
|
||||
const nextMillis = await this.repeatStrategy(now, repeatOpts, name);
|
||||
const { every, pattern } = repeatOpts;
|
||||
const hasImmediately = Boolean((every || pattern) && repeatOpts.immediately);
|
||||
const offset = hasImmediately && every ? now - nextMillis : undefined;
|
||||
if (nextMillis) {
|
||||
// We store the undecorated opts.jobId into the repeat options
|
||||
if (!prevMillis && opts.jobId) {
|
||||
repeatOpts.jobId = opts.jobId;
|
||||
}
|
||||
const legacyRepeatKey = getRepeatConcatOptions(name, repeatOpts);
|
||||
const newRepeatKey = (_b = opts.repeat.key) !== null && _b !== void 0 ? _b : this.hash(legacyRepeatKey);
|
||||
let repeatJobKey;
|
||||
if (override) {
|
||||
repeatJobKey = await this.scripts.addRepeatableJob(newRepeatKey, nextMillis, {
|
||||
name,
|
||||
endDate: endDate ? new Date(endDate).getTime() : undefined,
|
||||
tz: repeatOpts.tz,
|
||||
pattern,
|
||||
every,
|
||||
}, legacyRepeatKey);
|
||||
}
|
||||
else {
|
||||
const client = await this.client;
|
||||
repeatJobKey = await this.scripts.updateRepeatableJobMillis(client, newRepeatKey, nextMillis, legacyRepeatKey);
|
||||
}
|
||||
const { immediately } = repeatOpts, filteredRepeatOpts = tslib_1.__rest(repeatOpts, ["immediately"]);
|
||||
return this.createNextJob(name, nextMillis, repeatJobKey, Object.assign(Object.assign({}, opts), { repeat: Object.assign({ offset }, filteredRepeatOpts) }), data, iterationCount, hasImmediately);
|
||||
}
|
||||
}
|
||||
async createNextJob(name, nextMillis, repeatJobKey, opts, data, currentCount, hasImmediately) {
|
||||
//
|
||||
// Generate unique job id for this iteration.
|
||||
//
|
||||
const jobId = this.getRepeatJobKey(name, nextMillis, repeatJobKey, data);
|
||||
const now = Date.now();
|
||||
const delay = nextMillis + (opts.repeat.offset ? opts.repeat.offset : 0) - now;
|
||||
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 || hasImmediately ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey });
|
||||
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { count: currentCount });
|
||||
return this.Job.create(this, name, data, mergedOpts);
|
||||
}
|
||||
// TODO: remove legacy code in next breaking change
|
||||
getRepeatJobKey(name, nextMillis, repeatJobKey, data) {
|
||||
if (repeatJobKey.split(':').length > 2) {
|
||||
return this.getRepeatJobId({
|
||||
name: name,
|
||||
nextMillis: nextMillis,
|
||||
namespace: this.hash(repeatJobKey),
|
||||
jobId: data === null || data === void 0 ? void 0 : data.id,
|
||||
});
|
||||
}
|
||||
return this.getRepeatDelayedJobId({
|
||||
customKey: repeatJobKey,
|
||||
nextMillis,
|
||||
});
|
||||
}
|
||||
async removeRepeatable(name, repeat, jobId) {
|
||||
var _a;
|
||||
const repeatConcatOptions = getRepeatConcatOptions(name, Object.assign(Object.assign({}, repeat), { jobId }));
|
||||
const repeatJobKey = (_a = repeat.key) !== null && _a !== void 0 ? _a : this.hash(repeatConcatOptions);
|
||||
const legacyRepeatJobId = this.getRepeatJobId({
|
||||
name,
|
||||
nextMillis: '',
|
||||
namespace: this.hash(repeatConcatOptions),
|
||||
jobId: jobId !== null && jobId !== void 0 ? jobId : repeat.jobId,
|
||||
key: repeat.key,
|
||||
});
|
||||
return this.scripts.removeRepeatable(legacyRepeatJobId, repeatConcatOptions, repeatJobKey);
|
||||
}
|
||||
async removeRepeatableByKey(repeatJobKey) {
|
||||
const data = this.keyToData(repeatJobKey);
|
||||
const legacyRepeatJobId = this.getRepeatJobId({
|
||||
name: data.name,
|
||||
nextMillis: '',
|
||||
namespace: this.hash(repeatJobKey),
|
||||
jobId: data.id,
|
||||
});
|
||||
return this.scripts.removeRepeatable(legacyRepeatJobId, '', repeatJobKey);
|
||||
}
|
||||
async getRepeatableData(client, key, next) {
|
||||
const jobData = await client.hgetall(this.toKey('repeat:' + key));
|
||||
if (jobData) {
|
||||
return {
|
||||
key,
|
||||
name: jobData.name,
|
||||
endDate: parseInt(jobData.endDate) || null,
|
||||
tz: jobData.tz || null,
|
||||
pattern: jobData.pattern || null,
|
||||
every: jobData.every || null,
|
||||
next,
|
||||
};
|
||||
}
|
||||
return this.keyToData(key, next);
|
||||
}
|
||||
keyToData(key, next) {
|
||||
const data = key.split(':');
|
||||
const pattern = data.slice(4).join(':') || null;
|
||||
return {
|
||||
key,
|
||||
name: data[0],
|
||||
id: data[1] || null,
|
||||
endDate: parseInt(data[2]) || null,
|
||||
tz: data[3] || null,
|
||||
pattern,
|
||||
next,
|
||||
};
|
||||
}
|
||||
async getRepeatableJobs(start = 0, end = -1, asc = false) {
|
||||
const client = await this.client;
|
||||
const key = this.keys.repeat;
|
||||
const result = asc
|
||||
? await client.zrange(key, start, end, 'WITHSCORES')
|
||||
: await client.zrevrange(key, start, end, 'WITHSCORES');
|
||||
const jobs = [];
|
||||
for (let i = 0; i < result.length; i += 2) {
|
||||
jobs.push(this.getRepeatableData(client, result[i], parseInt(result[i + 1])));
|
||||
}
|
||||
return Promise.all(jobs);
|
||||
}
|
||||
async getRepeatableCount() {
|
||||
const client = await this.client;
|
||||
return client.zcard(this.toKey('repeat'));
|
||||
}
|
||||
hash(str) {
|
||||
return (0, crypto_1.createHash)(this.repeatKeyHashAlgorithm).update(str).digest('hex');
|
||||
}
|
||||
getRepeatDelayedJobId({ nextMillis, customKey, }) {
|
||||
return `repeat:${customKey}:${nextMillis}`;
|
||||
}
|
||||
getRepeatJobId({ name, nextMillis, namespace, jobId, key, }) {
|
||||
const checksum = key !== null && key !== void 0 ? key : this.hash(`${name}${jobId || ''}${namespace}`);
|
||||
return `repeat:${checksum}:${nextMillis}`;
|
||||
}
|
||||
}
|
||||
exports.Repeat = Repeat;
|
||||
function getRepeatConcatOptions(name, repeat) {
|
||||
const endDate = repeat.endDate ? new Date(repeat.endDate).getTime() : '';
|
||||
const tz = repeat.tz || '';
|
||||
const pattern = repeat.pattern;
|
||||
const suffix = (pattern ? pattern : String(repeat.every)) || '';
|
||||
const jobId = repeat.jobId ? repeat.jobId : '';
|
||||
return `${name}:${jobId}:${endDate}:${tz}:${suffix}`;
|
||||
}
|
||||
const getNextMillis = (millis, opts) => {
|
||||
const pattern = opts.pattern;
|
||||
if (pattern && opts.every) {
|
||||
throw new Error('Both .pattern and .every options are defined for this repeatable job');
|
||||
}
|
||||
if (opts.every) {
|
||||
return (Math.floor(millis / opts.every) * opts.every +
|
||||
(opts.immediately ? 0 : opts.every));
|
||||
}
|
||||
const currentDate = opts.startDate && new Date(opts.startDate) > new Date(millis)
|
||||
? new Date(opts.startDate)
|
||||
: new Date(millis);
|
||||
const interval = (0, cron_parser_1.parseExpression)(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
|
||||
try {
|
||||
if (opts.immediately) {
|
||||
return new Date().getTime();
|
||||
}
|
||||
else {
|
||||
return interval.next().getTime();
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// Ignore error
|
||||
}
|
||||
};
|
||||
exports.getNextMillis = getNextMillis;
|
||||
//# sourceMappingURL=repeat.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/repeat.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/repeat.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
111
backend/node_modules/bullmq/dist/cjs/classes/sandbox.js
generated
vendored
Normal file
111
backend/node_modules/bullmq/dist/cjs/classes/sandbox.js
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const enums_1 = require("../enums");
|
||||
const sandbox = (processFile, childPool) => {
|
||||
return async function process(job, token) {
|
||||
let child;
|
||||
let msgHandler;
|
||||
let exitHandler;
|
||||
try {
|
||||
const done = new Promise((resolve, reject) => {
|
||||
const initChild = async () => {
|
||||
try {
|
||||
exitHandler = (exitCode, signal) => {
|
||||
reject(new Error('Unexpected exit code: ' + exitCode + ' signal: ' + signal));
|
||||
};
|
||||
child = await childPool.retain(processFile);
|
||||
child.on('exit', exitHandler);
|
||||
msgHandler = async (msg) => {
|
||||
var _a, _b, _c, _d, _e;
|
||||
try {
|
||||
switch (msg.cmd) {
|
||||
case enums_1.ParentCommand.Completed:
|
||||
resolve(msg.value);
|
||||
break;
|
||||
case enums_1.ParentCommand.Failed:
|
||||
case enums_1.ParentCommand.Error: {
|
||||
const err = new Error();
|
||||
Object.assign(err, msg.value);
|
||||
reject(err);
|
||||
break;
|
||||
}
|
||||
case enums_1.ParentCommand.Progress:
|
||||
await job.updateProgress(msg.value);
|
||||
break;
|
||||
case enums_1.ParentCommand.Log:
|
||||
await job.log(msg.value);
|
||||
break;
|
||||
case enums_1.ParentCommand.MoveToDelayed:
|
||||
await job.moveToDelayed((_a = msg.value) === null || _a === void 0 ? void 0 : _a.timestamp, (_b = msg.value) === null || _b === void 0 ? void 0 : _b.token);
|
||||
break;
|
||||
case enums_1.ParentCommand.MoveToWait:
|
||||
await job.moveToWait((_c = msg.value) === null || _c === void 0 ? void 0 : _c.token);
|
||||
break;
|
||||
case enums_1.ParentCommand.MoveToWaitingChildren:
|
||||
{
|
||||
const value = await job.moveToWaitingChildren((_d = msg.value) === null || _d === void 0 ? void 0 : _d.token, (_e = msg.value) === null || _e === void 0 ? void 0 : _e.opts);
|
||||
child.send({
|
||||
requestId: msg.requestId,
|
||||
cmd: enums_1.ChildCommand.MoveToWaitingChildrenResponse,
|
||||
value,
|
||||
});
|
||||
}
|
||||
break;
|
||||
case enums_1.ParentCommand.Update:
|
||||
await job.updateData(msg.value);
|
||||
break;
|
||||
case enums_1.ParentCommand.GetChildrenValues:
|
||||
{
|
||||
const value = await job.getChildrenValues();
|
||||
child.send({
|
||||
requestId: msg.requestId,
|
||||
cmd: enums_1.ChildCommand.GetChildrenValuesResponse,
|
||||
value,
|
||||
});
|
||||
}
|
||||
break;
|
||||
case enums_1.ParentCommand.GetIgnoredChildrenFailures:
|
||||
{
|
||||
const value = await job.getIgnoredChildrenFailures();
|
||||
child.send({
|
||||
requestId: msg.requestId,
|
||||
cmd: enums_1.ChildCommand.GetIgnoredChildrenFailuresResponse,
|
||||
value,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
};
|
||||
child.on('message', msgHandler);
|
||||
child.send({
|
||||
cmd: enums_1.ChildCommand.Start,
|
||||
job: job.asJSONSandbox(),
|
||||
token,
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
};
|
||||
initChild();
|
||||
});
|
||||
await done;
|
||||
return done;
|
||||
}
|
||||
finally {
|
||||
if (child) {
|
||||
child.off('message', msgHandler);
|
||||
child.off('exit', exitHandler);
|
||||
if (child.exitCode === null && child.signalCode === null) {
|
||||
childPool.release(child);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
exports.default = sandbox;
|
||||
//# sourceMappingURL=sandbox.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/sandbox.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/sandbox.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"sandbox.js","sourceRoot":"","sources":["../../../src/classes/sandbox.ts"],"names":[],"mappings":";;AAAA,oCAAuD;AAMvD,MAAM,OAAO,GAAG,CACd,WAAgB,EAChB,SAAoB,EACpB,EAAE;IACF,OAAO,KAAK,UAAU,OAAO,CAAC,GAAiB,EAAE,KAAc;QAC7D,IAAI,KAAY,CAAC;QACjB,IAAI,UAAe,CAAC;QACpB,IAAI,WAAgB,CAAC;QACrB,IAAI,CAAC;YACH,MAAM,IAAI,GAAe,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;gBACvD,MAAM,SAAS,GAAG,KAAK,IAAI,EAAE;oBAC3B,IAAI,CAAC;wBACH,WAAW,GAAG,CAAC,QAAa,EAAE,MAAW,EAAE,EAAE;4BAC3C,MAAM,CACJ,IAAI,KAAK,CACP,wBAAwB,GAAG,QAAQ,GAAG,WAAW,GAAG,MAAM,CAC3D,CACF,CAAC;wBACJ,CAAC,CAAC;wBAEF,KAAK,GAAG,MAAM,SAAS,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC;wBAC5C,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;wBAE9B,UAAU,GAAG,KAAK,EAAE,GAAiB,EAAE,EAAE;;4BACvC,IAAI,CAAC;gCACH,QAAQ,GAAG,CAAC,GAAG,EAAE,CAAC;oCAChB,KAAK,qBAAa,CAAC,SAAS;wCAC1B,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACnB,MAAM;oCACR,KAAK,qBAAa,CAAC,MAAM,CAAC;oCAC1B,KAAK,qBAAa,CAAC,KAAK,CAAC,CAAC,CAAC;wCACzB,MAAM,GAAG,GAAG,IAAI,KAAK,EAAE,CAAC;wCACxB,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC;wCAC9B,MAAM,CAAC,GAAG,CAAC,CAAC;wCACZ,MAAM;oCACR,CAAC;oCACD,KAAK,qBAAa,CAAC,QAAQ;wCACzB,MAAM,GAAG,CAAC,cAAc,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACpC,MAAM;oCACR,KAAK,qBAAa,CAAC,GAAG;wCACpB,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACzB,MAAM;oCACR,KAAK,qBAAa,CAAC,aAAa;wCAC9B,MAAM,GAAG,CAAC,aAAa,CACrB,MAAA,GAAG,CAAC,KAAK,0CAAE,SAAS,EACpB,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CACjB,CAAC;wCACF,MAAM;oCACR,KAAK,qBAAa,CAAC,UAAU;wCAC3B,MAAM,GAAG,CAAC,UAAU,CAAC,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CAAC,CAAC;wCACvC,MAAM;oCACR,KAAK,qBAAa,CAAC,qBAAqB;wCACtC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,qBAAqB,CAC3C,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,EAChB,MAAA,GAAG,CAAC,KAAK,0CAAE,IAAI,CAChB,CAAC;4CACF,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,6BAA6B;gDAC/C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,qBAAa,CAAC,MAAM;wCACvB,MAAM,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCAChC,MAAM;oCACR,KAAK,qBAAa,CAAC,iBAAiB;wCAClC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,iBAAiB,EAAE,CAAC;4CAC5C,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,yBAAyB;gDAC3C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,qBAAa,CAAC,0BAA0B;wCAC3C,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,0BAA0B,EAAE,CAAC;4CACrD,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,kCAAkC;gDACpD,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;gCACV,CAAC;4BACH,CAAC;4BAAC,OAAO,GAAG,EAAE,CAAC;gCACb,MAAM,CAAC,GAAG,CAAC,CAAC;4BACd,CAAC;wBACH,CAAC,CAAC;wBAEF,KAAK,CAAC,EAAE,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;wBAEhC,KAAK,CAAC,IAAI,CAAC;4BACT,GAAG,EAAE,oBAAY,CAAC,KAAK;4BACvB,GAAG,EAAE,GAAG,CAAC,aAAa,EAAE;4BACxB,KAAK;yBACN,CAAC,CAAC;oBACL,CAAC;oBAAC,OAAO,KAAK,EAAE,CAAC;wBACf,MAAM,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;gBACH,CAAC,CAAC;gBACF,SAAS,EAAE,CAAC;YACd,CAAC,CAAC,CAAC;YAEH,MAAM,IAAI,CAAC;YACX,OAAO,IAAI,CAAC;QACd,CAAC;gBAAS,CAAC;YACT,IAAI,KAAK,EAAE,CAAC;gBACV,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;gBACjC,KAAK,CAAC,GAAG,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;gBAC/B,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;oBACzD,SAAS,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;gBAC3B,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC,CAAC;AACJ,CAAC,CAAC;AAEF,kBAAe,OAAO,CAAC"}
|
||||
1200
backend/node_modules/bullmq/dist/cjs/classes/scripts.js
generated
vendored
Normal file
1200
backend/node_modules/bullmq/dist/cjs/classes/scripts.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
backend/node_modules/bullmq/dist/cjs/classes/scripts.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/scripts.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
874
backend/node_modules/bullmq/dist/cjs/classes/worker.js
generated
vendored
Normal file
874
backend/node_modules/bullmq/dist/cjs/classes/worker.js
generated
vendored
Normal file
@@ -0,0 +1,874 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Worker = void 0;
|
||||
const fs = require("fs");
|
||||
const url_1 = require("url");
|
||||
const path = require("path");
|
||||
const uuid_1 = require("uuid");
|
||||
// Note: this Polyfill is only needed for Node versions < 15.4.0
|
||||
const node_abort_controller_1 = require("node-abort-controller");
|
||||
const utils_1 = require("../utils");
|
||||
const queue_base_1 = require("./queue-base");
|
||||
const repeat_1 = require("./repeat");
|
||||
const child_pool_1 = require("./child-pool");
|
||||
const redis_connection_1 = require("./redis-connection");
|
||||
const sandbox_1 = require("./sandbox");
|
||||
const async_fifo_queue_1 = require("./async-fifo-queue");
|
||||
const errors_1 = require("./errors");
|
||||
const enums_1 = require("../enums");
|
||||
const job_scheduler_1 = require("./job-scheduler");
|
||||
const lock_manager_1 = require("./lock-manager");
|
||||
// 10 seconds is the maximum time a BZPOPMIN can block.
|
||||
const maximumBlockTimeout = 10;
|
||||
/**
|
||||
*
|
||||
* This class represents a worker that is able to process jobs from the queue.
|
||||
* As soon as the class is instantiated and a connection to Redis is established
|
||||
* it will start processing jobs.
|
||||
*
|
||||
*/
|
||||
class Worker extends queue_base_1.QueueBase {
|
||||
static RateLimitError() {
|
||||
return new errors_1.RateLimitError();
|
||||
}
|
||||
constructor(name, processor, opts, Connection) {
|
||||
super(name, Object.assign(Object.assign({ drainDelay: 5, concurrency: 1, lockDuration: 30000, maximumRateLimitDelay: 30000, maxStalledCount: 1, stalledInterval: 30000, autorun: true, runRetryDelay: 15000 }, opts), { blockingConnection: true }), Connection);
|
||||
this.abortDelayController = null;
|
||||
this.blockUntil = 0;
|
||||
this.drained = false;
|
||||
this.limitUntil = 0;
|
||||
this.processorAcceptsSignal = false;
|
||||
this.waiting = null;
|
||||
this.running = false;
|
||||
this.mainLoopRunning = null;
|
||||
if (!opts || !opts.connection) {
|
||||
throw new Error('Worker requires a connection');
|
||||
}
|
||||
if (typeof this.opts.maxStalledCount !== 'number' ||
|
||||
this.opts.maxStalledCount < 0) {
|
||||
throw new Error('maxStalledCount must be greater or equal than 0');
|
||||
}
|
||||
if (typeof this.opts.maxStartedAttempts === 'number' &&
|
||||
this.opts.maxStartedAttempts < 0) {
|
||||
throw new Error('maxStartedAttempts must be greater or equal than 0');
|
||||
}
|
||||
if (typeof this.opts.stalledInterval !== 'number' ||
|
||||
this.opts.stalledInterval <= 0) {
|
||||
throw new Error('stalledInterval must be greater than 0');
|
||||
}
|
||||
if (typeof this.opts.drainDelay !== 'number' || this.opts.drainDelay <= 0) {
|
||||
throw new Error('drainDelay must be greater than 0');
|
||||
}
|
||||
this.concurrency = this.opts.concurrency;
|
||||
this.opts.lockRenewTime =
|
||||
this.opts.lockRenewTime || this.opts.lockDuration / 2;
|
||||
this.id = (0, uuid_1.v4)();
|
||||
this.createLockManager();
|
||||
if (processor) {
|
||||
if (typeof processor === 'function') {
|
||||
this.processFn = processor;
|
||||
// Check if processor accepts signal parameter (3rd parameter)
|
||||
this.processorAcceptsSignal = processor.length >= 3;
|
||||
}
|
||||
else {
|
||||
// SANDBOXED
|
||||
if (processor instanceof url_1.URL) {
|
||||
if (!fs.existsSync(processor)) {
|
||||
throw new Error(`URL ${processor} does not exist in the local file system`);
|
||||
}
|
||||
processor = processor.href;
|
||||
}
|
||||
else {
|
||||
const supportedFileTypes = ['.js', '.ts', '.flow', '.cjs', '.mjs'];
|
||||
const processorFile = processor +
|
||||
(supportedFileTypes.includes(path.extname(processor)) ? '' : '.js');
|
||||
if (!fs.existsSync(processorFile)) {
|
||||
throw new Error(`File ${processorFile} does not exist`);
|
||||
}
|
||||
}
|
||||
// Separate paths so that bundling tools can resolve dependencies easier
|
||||
const dirname = path.dirname(module.filename || __filename);
|
||||
const workerThreadsMainFile = path.join(dirname, 'main-worker.js');
|
||||
const spawnProcessMainFile = path.join(dirname, 'main.js');
|
||||
let mainFilePath = this.opts.useWorkerThreads
|
||||
? workerThreadsMainFile
|
||||
: spawnProcessMainFile;
|
||||
try {
|
||||
fs.statSync(mainFilePath); // would throw if file not exists
|
||||
}
|
||||
catch (_) {
|
||||
const mainFile = this.opts.useWorkerThreads
|
||||
? 'main-worker.js'
|
||||
: 'main.js';
|
||||
mainFilePath = path.join(process.cwd(), `dist/cjs/classes/${mainFile}`);
|
||||
fs.statSync(mainFilePath);
|
||||
}
|
||||
this.childPool = new child_pool_1.ChildPool({
|
||||
mainFile: mainFilePath,
|
||||
useWorkerThreads: this.opts.useWorkerThreads,
|
||||
workerForkOptions: this.opts.workerForkOptions,
|
||||
workerThreadsOptions: this.opts.workerThreadsOptions,
|
||||
});
|
||||
this.createSandbox(processor);
|
||||
}
|
||||
if (this.opts.autorun) {
|
||||
this.run().catch(error => this.emit('error', error));
|
||||
}
|
||||
}
|
||||
const connectionName = this.clientName() + (this.opts.name ? `:w:${this.opts.name}` : '');
|
||||
this.blockingConnection = new redis_connection_1.RedisConnection((0, utils_1.isRedisInstance)(opts.connection)
|
||||
? opts.connection.duplicate({ connectionName })
|
||||
: Object.assign(Object.assign({}, opts.connection), { connectionName }), {
|
||||
shared: false,
|
||||
blocking: true,
|
||||
skipVersionCheck: opts.skipVersionCheck,
|
||||
});
|
||||
this.blockingConnection.on('error', error => this.emit('error', error));
|
||||
this.blockingConnection.on('ready', () => setTimeout(() => this.emit('ready'), 0));
|
||||
}
|
||||
/**
|
||||
* Creates and configures the lock manager for processing jobs.
|
||||
* This method can be overridden in subclasses to customize lock manager behavior.
|
||||
*/
|
||||
createLockManager() {
|
||||
this.lockManager = new lock_manager_1.LockManager(this, {
|
||||
lockRenewTime: this.opts.lockRenewTime,
|
||||
lockDuration: this.opts.lockDuration,
|
||||
workerId: this.id,
|
||||
workerName: this.opts.name,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Creates and configures the sandbox for processing jobs.
|
||||
* This method can be overridden in subclasses to customize sandbox behavior.
|
||||
*
|
||||
* @param processor - The processor file path, URL, or function to be sandboxed
|
||||
*/
|
||||
createSandbox(processor) {
|
||||
this.processFn = (0, sandbox_1.default)(processor, this.childPool).bind(this);
|
||||
}
|
||||
/**
|
||||
* Public accessor method for LockManager to extend locks.
|
||||
* This delegates to the protected scripts object.
|
||||
*/
|
||||
async extendJobLocks(jobIds, tokens, duration) {
|
||||
return this.scripts.extendLocks(jobIds, tokens, duration);
|
||||
}
|
||||
emit(event, ...args) {
|
||||
return super.emit(event, ...args);
|
||||
}
|
||||
off(eventName, listener) {
|
||||
super.off(eventName, listener);
|
||||
return this;
|
||||
}
|
||||
on(event, listener) {
|
||||
super.on(event, listener);
|
||||
return this;
|
||||
}
|
||||
once(event, listener) {
|
||||
super.once(event, listener);
|
||||
return this;
|
||||
}
|
||||
callProcessJob(job, token, signal) {
|
||||
return this.processFn(job, token, signal);
|
||||
}
|
||||
createJob(data, jobId) {
|
||||
return this.Job.fromJSON(this, data, jobId);
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Waits until the worker is ready to start processing jobs.
|
||||
* In general only useful when writing tests.
|
||||
*
|
||||
*/
|
||||
async waitUntilReady() {
|
||||
await super.waitUntilReady();
|
||||
return this.blockingConnection.client;
|
||||
}
|
||||
/**
|
||||
* Cancels a specific job currently being processed by this worker.
|
||||
* The job's processor function will receive an abort signal.
|
||||
*
|
||||
* @param jobId - The ID of the job to cancel
|
||||
* @param reason - Optional reason for the cancellation
|
||||
* @returns true if the job was found and cancelled, false otherwise
|
||||
*/
|
||||
cancelJob(jobId, reason) {
|
||||
return this.lockManager.cancelJob(jobId, reason);
|
||||
}
|
||||
/**
|
||||
* Cancels all jobs currently being processed by this worker.
|
||||
* All active job processor functions will receive abort signals.
|
||||
*
|
||||
* @param reason - Optional reason for the cancellation
|
||||
*/
|
||||
cancelAllJobs(reason) {
|
||||
this.lockManager.cancelAllJobs(reason);
|
||||
}
|
||||
set concurrency(concurrency) {
|
||||
if (typeof concurrency !== 'number' ||
|
||||
concurrency < 1 ||
|
||||
!isFinite(concurrency)) {
|
||||
throw new Error('concurrency must be a finite number greater than 0');
|
||||
}
|
||||
this._concurrency = concurrency;
|
||||
}
|
||||
get concurrency() {
|
||||
return this._concurrency;
|
||||
}
|
||||
get repeat() {
|
||||
return new Promise(async (resolve) => {
|
||||
if (!this._repeat) {
|
||||
const connection = await this.client;
|
||||
this._repeat = new repeat_1.Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
|
||||
this._repeat.on('error', e => this.emit.bind(this, e));
|
||||
}
|
||||
resolve(this._repeat);
|
||||
});
|
||||
}
|
||||
get jobScheduler() {
|
||||
return new Promise(async (resolve) => {
|
||||
if (!this._jobScheduler) {
|
||||
const connection = await this.client;
|
||||
this._jobScheduler = new job_scheduler_1.JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
|
||||
this._jobScheduler.on('error', e => this.emit.bind(this, e));
|
||||
}
|
||||
resolve(this._jobScheduler);
|
||||
});
|
||||
}
|
||||
async run() {
|
||||
if (!this.processFn) {
|
||||
throw new Error('No process function is defined.');
|
||||
}
|
||||
if (this.running) {
|
||||
throw new Error('Worker is already running.');
|
||||
}
|
||||
try {
|
||||
this.running = true;
|
||||
if (this.closing || this.paused) {
|
||||
return;
|
||||
}
|
||||
await this.startStalledCheckTimer();
|
||||
if (!this.opts.skipLockRenewal) {
|
||||
this.lockManager.start();
|
||||
}
|
||||
const client = await this.client;
|
||||
const bclient = await this.blockingConnection.client;
|
||||
this.mainLoopRunning = this.mainLoop(client, bclient);
|
||||
// We must await here or finally will be called too early.
|
||||
await this.mainLoopRunning;
|
||||
}
|
||||
finally {
|
||||
this.running = false;
|
||||
}
|
||||
}
|
||||
async waitForRateLimit() {
|
||||
var _a;
|
||||
const limitUntil = this.limitUntil;
|
||||
if (limitUntil > Date.now()) {
|
||||
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
|
||||
this.abortDelayController = new node_abort_controller_1.AbortController();
|
||||
const delay = this.getRateLimitDelay(limitUntil - Date.now());
|
||||
await this.delay(delay, this.abortDelayController);
|
||||
this.drained = false;
|
||||
this.limitUntil = 0;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
|
||||
* as efficiently as possible, providing concurrency and minimal unnecessary calls
|
||||
* to Redis.
|
||||
*/
|
||||
async mainLoop(client, bclient) {
|
||||
const asyncFifoQueue = new async_fifo_queue_1.AsyncFifoQueue();
|
||||
let tokenPostfix = 0;
|
||||
while ((!this.closing && !this.paused) || asyncFifoQueue.numTotal() > 0) {
|
||||
/**
|
||||
* This inner loop tries to fetch jobs concurrently, but if we are waiting for a job
|
||||
* to arrive at the queue we should not try to fetch more jobs (as it would be pointless)
|
||||
*/
|
||||
while (!this.closing &&
|
||||
!this.paused &&
|
||||
!this.waiting &&
|
||||
asyncFifoQueue.numTotal() < this._concurrency &&
|
||||
!this.isRateLimited()) {
|
||||
const token = `${this.id}:${tokenPostfix++}`;
|
||||
const fetchedJob = this.retryIfFailed(() => this._getNextJob(client, bclient, token, { block: true }), {
|
||||
delayInMs: this.opts.runRetryDelay,
|
||||
onlyEmitError: true,
|
||||
});
|
||||
asyncFifoQueue.add(fetchedJob);
|
||||
if (this.waiting && asyncFifoQueue.numTotal() > 1) {
|
||||
// We are waiting for jobs but we have others that we could start processing already
|
||||
break;
|
||||
}
|
||||
// We await here so that we fetch jobs in sequence, this is important to avoid unnecessary calls
|
||||
// to Redis in high concurrency scenarios.
|
||||
const job = await fetchedJob;
|
||||
// No more jobs waiting but we have others that could start processing already
|
||||
if (!job && asyncFifoQueue.numTotal() > 1) {
|
||||
break;
|
||||
}
|
||||
// If there are potential jobs to be processed and blockUntil is set, we should exit to avoid waiting
|
||||
// for processing this job.
|
||||
if (this.blockUntil) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Since there can be undefined jobs in the queue (when a job fails or queue is empty)
|
||||
// we iterate until we find a job.
|
||||
let job;
|
||||
do {
|
||||
job = await asyncFifoQueue.fetch();
|
||||
} while (!job && asyncFifoQueue.numQueued() > 0);
|
||||
if (job) {
|
||||
const token = job.token;
|
||||
asyncFifoQueue.add(this.processJob(job, token, () => asyncFifoQueue.numTotal() <= this._concurrency));
|
||||
}
|
||||
else if (asyncFifoQueue.numQueued() === 0) {
|
||||
await this.waitForRateLimit();
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a promise that resolves to the next job in queue.
|
||||
* @param token - worker token to be assigned to retrieved job
|
||||
* @returns a Job or undefined if no job was available in the queue.
|
||||
*/
|
||||
async getNextJob(token, { block = true } = {}) {
|
||||
var _a, _b;
|
||||
const nextJob = await this._getNextJob(await this.client, await this.blockingConnection.client, token, { block });
|
||||
return this.trace(enums_1.SpanKind.INTERNAL, 'getNextJob', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.QueueName]: this.name,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
[enums_1.TelemetryAttributes.WorkerOptions]: JSON.stringify({ block }),
|
||||
[enums_1.TelemetryAttributes.JobId]: nextJob === null || nextJob === void 0 ? void 0 : nextJob.id,
|
||||
});
|
||||
return nextJob;
|
||||
}, (_b = (_a = nextJob === null || nextJob === void 0 ? void 0 : nextJob.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata);
|
||||
}
|
||||
async _getNextJob(client, bclient, token, { block = true } = {}) {
|
||||
if (this.paused) {
|
||||
return;
|
||||
}
|
||||
if (this.closing) {
|
||||
return;
|
||||
}
|
||||
if (this.drained && block && !this.limitUntil && !this.waiting) {
|
||||
this.waiting = this.waitForJob(bclient, this.blockUntil);
|
||||
try {
|
||||
this.blockUntil = await this.waiting;
|
||||
if (this.blockUntil <= 0 || this.blockUntil - Date.now() < 1) {
|
||||
return await this.moveToActive(client, token, this.opts.name);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
this.waiting = null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!this.isRateLimited()) {
|
||||
return this.moveToActive(client, token, this.opts.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Overrides the rate limit to be active for the next jobs.
|
||||
* @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
|
||||
* @param expireTimeMs - expire time in ms of this rate limit.
|
||||
*/
|
||||
async rateLimit(expireTimeMs) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerRateLimit]: expireTimeMs,
|
||||
});
|
||||
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
|
||||
});
|
||||
}
|
||||
get minimumBlockTimeout() {
|
||||
return this.blockingConnection.capabilities.canBlockFor1Ms
|
||||
? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds.
|
||||
Obviously we can still process much faster than 1 job per millisecond but delays and rate limits
|
||||
will never work with more accuracy than 1ms. */
|
||||
0.001
|
||||
: 0.002;
|
||||
}
|
||||
isRateLimited() {
|
||||
return this.limitUntil > Date.now();
|
||||
}
|
||||
async moveToActive(client, token, name) {
|
||||
const [jobData, id, rateLimitDelay, delayUntil] = await this.scripts.moveToActive(client, token, name);
|
||||
this.updateDelays(rateLimitDelay, delayUntil);
|
||||
return this.nextJobFromJobData(jobData, id, token);
|
||||
}
|
||||
async waitForJob(bclient, blockUntil) {
|
||||
if (this.paused) {
|
||||
return Infinity;
|
||||
}
|
||||
let timeout;
|
||||
try {
|
||||
if (!this.closing && !this.isRateLimited()) {
|
||||
let blockTimeout = this.getBlockTimeout(blockUntil);
|
||||
if (blockTimeout > 0) {
|
||||
blockTimeout = this.blockingConnection.capabilities.canDoubleTimeout
|
||||
? blockTimeout
|
||||
: Math.ceil(blockTimeout);
|
||||
// We cannot trust that the blocking connection stays blocking forever
|
||||
// due to issues in Redis and IORedis, so we will reconnect if we
|
||||
// don't get a response in the expected time.
|
||||
timeout = setTimeout(async () => {
|
||||
bclient.disconnect(!this.closing);
|
||||
}, blockTimeout * 1000 + 1000);
|
||||
this.updateDelays(); // reset delays to avoid reusing same values in next iteration
|
||||
// Markers should only be used for un-blocking, so we will handle them in this
|
||||
// function only.
|
||||
const result = await bclient.bzpopmin(this.keys.marker, blockTimeout);
|
||||
if (result) {
|
||||
const [_key, member, score] = result;
|
||||
if (member) {
|
||||
const newBlockUntil = parseInt(score);
|
||||
// Use by pro version as rate limited groups could generate lower blockUntil values
|
||||
// markers only return delays for delayed jobs
|
||||
if (blockUntil && newBlockUntil > blockUntil) {
|
||||
return blockUntil;
|
||||
}
|
||||
return newBlockUntil;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
if ((0, utils_1.isNotConnectionError)(error)) {
|
||||
this.emit('error', error);
|
||||
}
|
||||
if (!this.closing) {
|
||||
await this.delay();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
clearTimeout(timeout);
|
||||
}
|
||||
return Infinity;
|
||||
}
|
||||
getBlockTimeout(blockUntil) {
|
||||
const opts = this.opts;
|
||||
// when there are delayed jobs
|
||||
if (blockUntil) {
|
||||
const blockDelay = blockUntil - Date.now();
|
||||
// when we reach the time to get new jobs
|
||||
if (blockDelay <= 0) {
|
||||
return blockDelay;
|
||||
}
|
||||
else if (blockDelay < this.minimumBlockTimeout * 1000) {
|
||||
return this.minimumBlockTimeout;
|
||||
}
|
||||
else {
|
||||
// We restrict the maximum block timeout to 10 second to avoid
|
||||
// blocking the connection for too long in the case of reconnections
|
||||
// reference: https://github.com/taskforcesh/bullmq/issues/1658
|
||||
return Math.min(blockDelay / 1000, maximumBlockTimeout);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return Math.max(opts.drainDelay, this.minimumBlockTimeout);
|
||||
}
|
||||
}
|
||||
getRateLimitDelay(delay) {
|
||||
// We restrict the maximum limit delay to the configured maximumRateLimitDelay
|
||||
// to be able to promote delayed jobs while the queue is rate limited
|
||||
return Math.min(delay, this.opts.maximumRateLimitDelay);
|
||||
}
|
||||
/**
|
||||
*
|
||||
* This function is exposed only for testing purposes.
|
||||
*/
|
||||
async delay(milliseconds, abortController) {
|
||||
await (0, utils_1.delay)(milliseconds || utils_1.DELAY_TIME_1, abortController);
|
||||
}
|
||||
updateDelays(limitDelay = 0, delayUntil = 0) {
|
||||
const clampedLimit = Math.max(limitDelay, 0);
|
||||
if (clampedLimit > 0) {
|
||||
this.limitUntil = Date.now() + clampedLimit;
|
||||
}
|
||||
else {
|
||||
this.limitUntil = 0;
|
||||
}
|
||||
this.blockUntil = Math.max(delayUntil, 0) || 0;
|
||||
}
|
||||
async nextJobFromJobData(jobData, jobId, token) {
|
||||
if (!jobData) {
|
||||
if (!this.drained) {
|
||||
this.emit('drained');
|
||||
this.drained = true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.drained = false;
|
||||
const job = this.createJob(jobData, jobId);
|
||||
job.token = token;
|
||||
try {
|
||||
await this.retryIfFailed(async () => {
|
||||
if (job.repeatJobKey && job.repeatJobKey.split(':').length < 5) {
|
||||
const jobScheduler = await this.jobScheduler;
|
||||
await jobScheduler.upsertJobScheduler(
|
||||
// Most of these arguments are not really needed
|
||||
// anymore as we read them from the job scheduler itself
|
||||
job.repeatJobKey, job.opts.repeat, job.name, job.data, job.opts, { override: false, producerId: job.id });
|
||||
}
|
||||
else if (job.opts.repeat) {
|
||||
const repeat = await this.repeat;
|
||||
await repeat.updateRepeatableJob(job.name, job.data, job.opts, {
|
||||
override: false,
|
||||
});
|
||||
}
|
||||
}, { delayInMs: this.opts.runRetryDelay });
|
||||
}
|
||||
catch (err) {
|
||||
// Emit error but don't throw to avoid breaking current job completion
|
||||
// Note: This means the next repeatable job will not be scheduled
|
||||
const errorMessage = err instanceof Error ? err.message : String(err);
|
||||
const schedulingError = new Error(`Failed to add repeatable job for next iteration: ${errorMessage}`);
|
||||
this.emit('error', schedulingError);
|
||||
// Return undefined to indicate no next job is available
|
||||
return undefined;
|
||||
}
|
||||
return job;
|
||||
}
|
||||
}
|
||||
async processJob(job, token, fetchNextCallback = () => true) {
|
||||
var _a, _b;
|
||||
const srcPropagationMedatada = (_b = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata;
|
||||
return this.trace(enums_1.SpanKind.CONSUMER, 'process', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
[enums_1.TelemetryAttributes.JobId]: job.id,
|
||||
[enums_1.TelemetryAttributes.JobName]: job.name,
|
||||
});
|
||||
this.emit('active', job, 'waiting');
|
||||
const processedOn = Date.now();
|
||||
const abortController = this.lockManager.trackJob(job.id, token, processedOn, this.processorAcceptsSignal);
|
||||
try {
|
||||
const unrecoverableErrorMessage = this.getUnrecoverableErrorMessage(job);
|
||||
if (unrecoverableErrorMessage) {
|
||||
const failed = await this.retryIfFailed(() => {
|
||||
this.lockManager.untrackJob(job.id);
|
||||
return this.handleFailed(new errors_1.UnrecoverableError(unrecoverableErrorMessage), job, token, fetchNextCallback, span);
|
||||
}, { delayInMs: this.opts.runRetryDelay, span });
|
||||
return failed;
|
||||
}
|
||||
const result = await this.callProcessJob(job, token, abortController
|
||||
? abortController.signal
|
||||
: undefined);
|
||||
return await this.retryIfFailed(() => {
|
||||
this.lockManager.untrackJob(job.id);
|
||||
return this.handleCompleted(result, job, token, fetchNextCallback, span);
|
||||
}, { delayInMs: this.opts.runRetryDelay, span });
|
||||
}
|
||||
catch (err) {
|
||||
const failed = await this.retryIfFailed(() => {
|
||||
this.lockManager.untrackJob(job.id);
|
||||
return this.handleFailed(err, job, token, fetchNextCallback, span);
|
||||
}, { delayInMs: this.opts.runRetryDelay, span, onlyEmitError: true });
|
||||
return failed;
|
||||
}
|
||||
finally {
|
||||
this.lockManager.untrackJob(job.id);
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobFinishedTimestamp]: Date.now(),
|
||||
[enums_1.TelemetryAttributes.JobProcessedTimestamp]: processedOn,
|
||||
});
|
||||
}
|
||||
}, srcPropagationMedatada);
|
||||
}
|
||||
getUnrecoverableErrorMessage(job) {
|
||||
if (job.deferredFailure) {
|
||||
return job.deferredFailure;
|
||||
}
|
||||
if (this.opts.maxStartedAttempts &&
|
||||
this.opts.maxStartedAttempts < job.attemptsStarted) {
|
||||
return 'job started more than allowable limit';
|
||||
}
|
||||
}
|
||||
async handleCompleted(result, job, token, fetchNextCallback = () => true, span) {
|
||||
if (!this.connection.closing) {
|
||||
const completed = await job.moveToCompleted(result, token, fetchNextCallback() && !(this.closing || this.paused));
|
||||
this.emit('completed', job, result, 'active');
|
||||
span === null || span === void 0 ? void 0 : span.addEvent('job completed', {
|
||||
[enums_1.TelemetryAttributes.JobResult]: JSON.stringify(result),
|
||||
});
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
|
||||
});
|
||||
if (Array.isArray(completed)) {
|
||||
const [jobData, jobId, rateLimitDelay, delayUntil] = completed;
|
||||
this.updateDelays(rateLimitDelay, delayUntil);
|
||||
return this.nextJobFromJobData(jobData, jobId, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
async handleFailed(err, job, token, fetchNextCallback = () => true, span) {
|
||||
if (!this.connection.closing) {
|
||||
// Check if the job was manually rate-limited
|
||||
if (err.message === errors_1.RATE_LIMIT_ERROR) {
|
||||
const rateLimitTtl = await this.moveLimitedBackToWait(job, token);
|
||||
this.limitUntil = rateLimitTtl > 0 ? Date.now() + rateLimitTtl : 0;
|
||||
return;
|
||||
}
|
||||
if (err instanceof errors_1.DelayedError ||
|
||||
err.name == 'DelayedError' ||
|
||||
err instanceof errors_1.WaitingError ||
|
||||
err.name == 'WaitingError' ||
|
||||
err instanceof errors_1.WaitingChildrenError ||
|
||||
err.name == 'WaitingChildrenError') {
|
||||
const client = await this.client;
|
||||
return this.moveToActive(client, token, this.opts.name);
|
||||
}
|
||||
const result = await job.moveToFailed(err, token, fetchNextCallback() && !(this.closing || this.paused));
|
||||
this.emit('failed', job, err, 'active');
|
||||
span === null || span === void 0 ? void 0 : span.addEvent('job failed', {
|
||||
[enums_1.TelemetryAttributes.JobFailedReason]: err.message,
|
||||
});
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
|
||||
});
|
||||
// Note: result can be undefined if moveToFailed fails (e.g., lock was lost)
|
||||
if (Array.isArray(result)) {
|
||||
const [jobData, jobId, rateLimitDelay, delayUntil] = result;
|
||||
this.updateDelays(rateLimitDelay, delayUntil);
|
||||
return this.nextJobFromJobData(jobData, jobId, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Pauses the processing of this queue only for this worker.
|
||||
*/
|
||||
async pause(doNotWaitActive) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'pause', this.name, async (span) => {
|
||||
var _a;
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
[enums_1.TelemetryAttributes.WorkerDoNotWaitActive]: doNotWaitActive,
|
||||
});
|
||||
if (!this.paused) {
|
||||
this.paused = true;
|
||||
if (!doNotWaitActive) {
|
||||
await this.whenCurrentJobsFinished();
|
||||
}
|
||||
(_a = this.stalledCheckStopper) === null || _a === void 0 ? void 0 : _a.call(this);
|
||||
this.emit('paused');
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Resumes processing of this worker (if paused).
|
||||
*/
|
||||
resume() {
|
||||
if (!this.running) {
|
||||
this.trace(enums_1.SpanKind.INTERNAL, 'resume', this.name, span => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
});
|
||||
this.paused = false;
|
||||
if (this.processFn) {
|
||||
this.run();
|
||||
}
|
||||
this.emit('resumed');
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Checks if worker is paused.
|
||||
*
|
||||
* @returns true if worker is paused, false otherwise.
|
||||
*/
|
||||
isPaused() {
|
||||
return !!this.paused;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Checks if worker is currently running.
|
||||
*
|
||||
* @returns true if worker is running, false otherwise.
|
||||
*/
|
||||
isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Closes the worker and related redis connections.
|
||||
*
|
||||
* This method waits for current jobs to finalize before returning.
|
||||
*
|
||||
* @param force - Use force boolean parameter if you do not want to wait for
|
||||
* current jobs to be processed. When using telemetry, be mindful that it can
|
||||
* interfere with the proper closure of spans, potentially preventing them from being exported.
|
||||
*
|
||||
* @returns Promise that resolves when the worker has been closed.
|
||||
*/
|
||||
async close(force = false) {
|
||||
if (this.closing) {
|
||||
return this.closing;
|
||||
}
|
||||
this.closing = (async () => {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'close', this.name, async (span) => {
|
||||
var _a, _b;
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
[enums_1.TelemetryAttributes.WorkerForceClose]: force,
|
||||
});
|
||||
this.emit('closing', 'closing queue');
|
||||
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
|
||||
// Define the async cleanup functions
|
||||
const asyncCleanups = [
|
||||
() => {
|
||||
return force || this.whenCurrentJobsFinished(false);
|
||||
},
|
||||
() => this.lockManager.close(),
|
||||
() => { var _a; return (_a = this.childPool) === null || _a === void 0 ? void 0 : _a.clean(); },
|
||||
() => this.blockingConnection.close(force),
|
||||
() => this.connection.close(force),
|
||||
];
|
||||
// Run cleanup functions sequentially and make sure all are run despite any errors
|
||||
for (const cleanup of asyncCleanups) {
|
||||
try {
|
||||
await cleanup();
|
||||
}
|
||||
catch (err) {
|
||||
this.emit('error', err);
|
||||
}
|
||||
}
|
||||
(_b = this.stalledCheckStopper) === null || _b === void 0 ? void 0 : _b.call(this);
|
||||
this.closed = true;
|
||||
this.emit('closed');
|
||||
});
|
||||
})();
|
||||
return await this.closing;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* Manually starts the stalled checker.
|
||||
* The check will run once as soon as this method is called, and
|
||||
* then every opts.stalledInterval milliseconds until the worker is closed.
|
||||
* Note: Normally you do not need to call this method, since the stalled checker
|
||||
* is automatically started when the worker starts processing jobs after
|
||||
* calling run. However if you want to process the jobs manually you need
|
||||
* to call this method to start the stalled checker.
|
||||
*
|
||||
* @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
|
||||
*/
|
||||
async startStalledCheckTimer() {
|
||||
if (!this.opts.skipStalledCheck) {
|
||||
if (!this.closing) {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'startStalledCheckTimer', this.name, async (span) => {
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
});
|
||||
this.stalledChecker().catch(err => {
|
||||
this.emit('error', err);
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
async stalledChecker() {
|
||||
while (!(this.closing || this.paused)) {
|
||||
await this.checkConnectionError(() => this.moveStalledJobsToWait());
|
||||
await new Promise(resolve => {
|
||||
const timeout = setTimeout(resolve, this.opts.stalledInterval);
|
||||
this.stalledCheckStopper = () => {
|
||||
clearTimeout(timeout);
|
||||
resolve();
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a promise that resolves when active jobs are cleared
|
||||
*
|
||||
* @returns
|
||||
*/
|
||||
async whenCurrentJobsFinished(reconnect = true) {
|
||||
//
|
||||
// Force reconnection of blocking connection to abort blocking redis call immediately.
|
||||
//
|
||||
if (this.waiting) {
|
||||
// If we are not going to reconnect, we will not wait for the disconnection.
|
||||
await this.blockingConnection.disconnect(reconnect);
|
||||
}
|
||||
else {
|
||||
reconnect = false;
|
||||
}
|
||||
if (this.mainLoopRunning) {
|
||||
await this.mainLoopRunning;
|
||||
}
|
||||
reconnect && (await this.blockingConnection.reconnect());
|
||||
}
|
||||
async retryIfFailed(fn, opts) {
|
||||
var _a;
|
||||
let retry = 0;
|
||||
const maxRetries = opts.maxRetries || Infinity;
|
||||
do {
|
||||
try {
|
||||
return await fn();
|
||||
}
|
||||
catch (err) {
|
||||
(_a = opts.span) === null || _a === void 0 ? void 0 : _a.recordException(err.message);
|
||||
if ((0, utils_1.isNotConnectionError)(err)) {
|
||||
// Emit error when not paused or closing; optionally swallow (no throw) when opts.onlyEmitError is set.
|
||||
if (!this.paused && !this.closing) {
|
||||
this.emit('error', err);
|
||||
}
|
||||
if (opts.onlyEmitError) {
|
||||
return;
|
||||
}
|
||||
else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (opts.delayInMs && !this.closing && !this.closed) {
|
||||
await this.delay(opts.delayInMs, this.abortDelayController);
|
||||
}
|
||||
if (retry + 1 >= maxRetries) {
|
||||
// If we've reached max retries, throw the last error
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (++retry < maxRetries);
|
||||
}
|
||||
async moveStalledJobsToWait() {
|
||||
await this.trace(enums_1.SpanKind.INTERNAL, 'moveStalledJobsToWait', this.name, async (span) => {
|
||||
const stalled = await this.scripts.moveStalledJobsToWait();
|
||||
span === null || span === void 0 ? void 0 : span.setAttributes({
|
||||
[enums_1.TelemetryAttributes.WorkerId]: this.id,
|
||||
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
|
||||
[enums_1.TelemetryAttributes.WorkerStalledJobs]: stalled,
|
||||
});
|
||||
stalled.forEach((jobId) => {
|
||||
span === null || span === void 0 ? void 0 : span.addEvent('job stalled', {
|
||||
[enums_1.TelemetryAttributes.JobId]: jobId,
|
||||
});
|
||||
this.emit('stalled', jobId, 'active');
|
||||
});
|
||||
});
|
||||
}
|
||||
moveLimitedBackToWait(job, token) {
|
||||
return job.moveToWait(token);
|
||||
}
|
||||
}
|
||||
exports.Worker = Worker;
|
||||
//# sourceMappingURL=worker.js.map
|
||||
1
backend/node_modules/bullmq/dist/cjs/classes/worker.js.map
generated
vendored
Normal file
1
backend/node_modules/bullmq/dist/cjs/classes/worker.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
107
backend/node_modules/bullmq/dist/cjs/commands/addDelayedJob-6.lua
generated
vendored
Normal file
107
backend/node_modules/bullmq/dist/cjs/commands/addDelayedJob-6.lua
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
--[[
|
||||
Adds a delayed job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
|
||||
- computes timestamp.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'marker',
|
||||
KEYS[2] 'meta'
|
||||
KEYS[3] 'id'
|
||||
KEYS[4] 'delayed'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] events stream key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (use custom instead of one generated automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[2]
|
||||
local idKey = KEYS[3]
|
||||
local delayedKey = KEYS[4]
|
||||
|
||||
local completedKey = KEYS[5]
|
||||
local eventsKey = KEYS[6]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayedJob"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, delayedKey, deduplicationKey,
|
||||
eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
|
||||
opts, timestamp, parentKey, parentData, repeatJobKey)
|
||||
|
||||
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, KEYS[1], delay)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
198
backend/node_modules/bullmq/dist/cjs/commands/addJobScheduler-11.lua
generated
vendored
Normal file
198
backend/node_modules/bullmq/dist/cjs/commands/addJobScheduler-11.lua
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
--[[
|
||||
Adds a job scheduler, i.e. a job factory that creates jobs based on a given schedule (repeat options).
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
KEYS[2] 'delayed' key
|
||||
KEYS[3] 'wait' key
|
||||
KEYS[4] 'paused' key
|
||||
KEYS[5] 'meta' key
|
||||
KEYS[6] 'prioritized' key
|
||||
KEYS[7] 'marker' key
|
||||
KEYS[8] 'id' key
|
||||
KEYS[9] 'events' key
|
||||
KEYS[10] 'pc' priority counter
|
||||
KEYS[11] 'active' key
|
||||
|
||||
ARGV[1] next milliseconds
|
||||
ARGV[2] msgpacked options
|
||||
[1] name
|
||||
[2] tz?
|
||||
[3] pattern?
|
||||
[4] endDate?
|
||||
[5] every?
|
||||
ARGV[3] jobs scheduler id
|
||||
ARGV[4] Json stringified template data
|
||||
ARGV[5] mspacked template opts
|
||||
ARGV[6] msgpacked delayed opts
|
||||
ARGV[7] timestamp
|
||||
ARGV[8] prefix key
|
||||
ARGV[9] producer key
|
||||
|
||||
Output:
|
||||
repeatableKey - OK
|
||||
]] local rcall = redis.call
|
||||
local repeatKey = KEYS[1]
|
||||
local delayedKey = KEYS[2]
|
||||
local waitKey = KEYS[3]
|
||||
local pausedKey = KEYS[4]
|
||||
local metaKey = KEYS[5]
|
||||
local prioritizedKey = KEYS[6]
|
||||
local eventsKey = KEYS[9]
|
||||
|
||||
local nextMillis = ARGV[1]
|
||||
local jobSchedulerId = ARGV[3]
|
||||
local templateOpts = cmsgpack.unpack(ARGV[5])
|
||||
local now = tonumber(ARGV[7])
|
||||
local prefixKey = ARGV[8]
|
||||
local jobOpts = cmsgpack.unpack(ARGV[6])
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobFromScheduler"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/isQueuePaused"
|
||||
--- @include "includes/removeJob"
|
||||
--- @include "includes/storeJobScheduler"
|
||||
--- @include "includes/getJobSchedulerEveryNextMillis"
|
||||
|
||||
-- If we are overriding a repeatable job we must delete the delayed job for
|
||||
-- the next iteration.
|
||||
local schedulerKey = repeatKey .. ":" .. jobSchedulerId
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local templateData = ARGV[4]
|
||||
|
||||
local prevMillis = rcall("ZSCORE", repeatKey, jobSchedulerId)
|
||||
if prevMillis then
|
||||
prevMillis = tonumber(prevMillis)
|
||||
end
|
||||
local schedulerOpts = cmsgpack.unpack(ARGV[2])
|
||||
|
||||
local every = schedulerOpts['every']
|
||||
|
||||
-- For backwards compatibility we also check the offset from the job itself.
|
||||
-- could be removed in future major versions.
|
||||
local jobOffset = jobOpts['repeat'] and jobOpts['repeat']['offset'] or 0
|
||||
local offset = schedulerOpts['offset'] or jobOffset or 0
|
||||
local newOffset = offset
|
||||
|
||||
local updatedEvery = false
|
||||
if every then
|
||||
-- if we changed the 'every' value we need to reset millis to nil
|
||||
local millis = prevMillis
|
||||
if prevMillis then
|
||||
local prevEvery = tonumber(rcall("HGET", schedulerKey, "every"))
|
||||
if prevEvery ~= every then
|
||||
millis = nil
|
||||
updatedEvery = true
|
||||
end
|
||||
end
|
||||
|
||||
local startDate = schedulerOpts['startDate']
|
||||
nextMillis, newOffset = getJobSchedulerEveryNextMillis(millis, every, now, offset, startDate)
|
||||
end
|
||||
|
||||
local function removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, jobId, metaKey,
|
||||
eventsKey)
|
||||
if rcall("ZSCORE", delayedKey, jobId) then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
rcall("ZREM", delayedKey, jobId)
|
||||
return true
|
||||
elseif rcall("ZSCORE", prioritizedKey, jobId) then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
rcall("ZREM", prioritizedKey, jobId)
|
||||
return true
|
||||
else
|
||||
local pausedOrWaitKey = waitKey
|
||||
if isQueuePaused(metaKey) then
|
||||
pausedOrWaitKey = pausedKey
|
||||
end
|
||||
|
||||
if rcall("LREM", pausedOrWaitKey, 1, jobId) > 0 then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
return true
|
||||
end
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local removedPrevJob = false
|
||||
if prevMillis then
|
||||
local currentJobId = "repeat:" .. jobSchedulerId .. ":" .. prevMillis
|
||||
local currentJobKey = schedulerKey .. ":" .. prevMillis
|
||||
|
||||
-- In theory it should always exist the currentJobKey if there is a prevMillis unless something has
|
||||
-- gone really wrong.
|
||||
if rcall("EXISTS", currentJobKey) == 1 then
|
||||
removedPrevJob = removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, currentJobId,
|
||||
metaKey, eventsKey)
|
||||
end
|
||||
end
|
||||
|
||||
if removedPrevJob then
|
||||
-- The jobs has been removed and we want to replace it, so lets use the same millis.
|
||||
if every and not updatedEvery then
|
||||
nextMillis = prevMillis
|
||||
end
|
||||
else
|
||||
-- Special case where no job was removed, and we need to add the next iteration.
|
||||
schedulerOpts['offset'] = newOffset
|
||||
end
|
||||
|
||||
-- Check for job ID collision with existing jobs (in any state)
|
||||
local jobId = "repeat:" .. jobSchedulerId .. ":" .. nextMillis
|
||||
local jobKey = prefixKey .. jobId
|
||||
|
||||
-- If there's already a job with this ID, in a state
|
||||
-- that is not updatable (active, completed, failed) we must
|
||||
-- handle the collision
|
||||
local hasCollision = false
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
if every then
|
||||
-- For 'every' case: try next time slot to avoid collision
|
||||
local nextSlotMillis = nextMillis + every
|
||||
local nextSlotJobId = "repeat:" .. jobSchedulerId .. ":" .. nextSlotMillis
|
||||
local nextSlotJobKey = prefixKey .. nextSlotJobId
|
||||
|
||||
if rcall("EXISTS", nextSlotJobKey) == 0 then
|
||||
-- Next slot is free, use it
|
||||
nextMillis = nextSlotMillis
|
||||
jobId = nextSlotJobId
|
||||
else
|
||||
-- Next slot also has a job, return error code
|
||||
return -11 -- SchedulerJobSlotsBusy
|
||||
end
|
||||
else
|
||||
hasCollision = true
|
||||
end
|
||||
end
|
||||
|
||||
local delay = nextMillis - now
|
||||
|
||||
-- Fast Clamp delay to minimum of 0
|
||||
if delay < 0 then
|
||||
delay = 0
|
||||
end
|
||||
|
||||
local nextJobKey = schedulerKey .. ":" .. nextMillis
|
||||
|
||||
if not hasCollision or removedPrevJob then
|
||||
-- jobId already calculated above during collision check
|
||||
|
||||
storeJobScheduler(jobSchedulerId, schedulerKey, repeatKey, nextMillis, schedulerOpts, templateData, templateOpts)
|
||||
|
||||
rcall("INCR", KEYS[8])
|
||||
|
||||
addJobFromScheduler(nextJobKey, jobId, jobOpts, waitKey, pausedKey, KEYS[11], metaKey, prioritizedKey, KEYS[10],
|
||||
delayedKey, KEYS[7], eventsKey, schedulerOpts['name'], maxEvents, now, templateData, jobSchedulerId, delay)
|
||||
elseif hasCollision then
|
||||
-- For 'pattern' case: return error code
|
||||
return -10 -- SchedulerJobIdCollision
|
||||
end
|
||||
|
||||
if ARGV[9] ~= "" then
|
||||
rcall("HSET", ARGV[9], "nrjid", jobId)
|
||||
end
|
||||
|
||||
return {jobId .. "", delay}
|
||||
30
backend/node_modules/bullmq/dist/cjs/commands/addLog-2.lua
generated
vendored
Normal file
30
backend/node_modules/bullmq/dist/cjs/commands/addLog-2.lua
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
--[[
|
||||
Add job log
|
||||
|
||||
Input:
|
||||
KEYS[1] job id key
|
||||
KEYS[2] job logs key
|
||||
|
||||
ARGV[1] id
|
||||
ARGV[2] log
|
||||
ARGV[3] keepLogs
|
||||
|
||||
Output:
|
||||
-1 - Missing job.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists
|
||||
local logCount = rcall("RPUSH", KEYS[2], ARGV[2])
|
||||
|
||||
if ARGV[3] ~= '' then
|
||||
local keepLogs = tonumber(ARGV[3])
|
||||
rcall("LTRIM", KEYS[2], -keepLogs, -1)
|
||||
|
||||
return math.min(keepLogs, logCount)
|
||||
end
|
||||
|
||||
return logCount
|
||||
else
|
||||
return -1
|
||||
end
|
||||
98
backend/node_modules/bullmq/dist/cjs/commands/addParentJob-6.lua
generated
vendored
Normal file
98
backend/node_modules/bullmq/dist/cjs/commands/addParentJob-6.lua
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
--[[
|
||||
Adds a parent job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
- adds the job to the waiting-children zset
|
||||
|
||||
Input:
|
||||
KEYS[1] 'meta'
|
||||
KEYS[2] 'id'
|
||||
KEYS[3] 'delayed'
|
||||
KEYS[4] 'waiting-children'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] events stream key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[1]
|
||||
local idKey = KEYS[2]
|
||||
|
||||
local completedKey = KEYS[5]
|
||||
local eventsKey = KEYS[6]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
|
||||
local waitChildrenKey = KEYS[4]
|
||||
rcall("ZADD", waitChildrenKey, timestamp, jobId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event",
|
||||
"waiting-children", "jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
117
backend/node_modules/bullmq/dist/cjs/commands/addPrioritizedJob-9.lua
generated
vendored
Normal file
117
backend/node_modules/bullmq/dist/cjs/commands/addPrioritizedJob-9.lua
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
--[[
|
||||
Adds a priotitized job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
- Adds the job to the "added" list so that workers gets notified.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'marker',
|
||||
KEYS[2] 'meta'
|
||||
KEYS[3] 'id'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'delayed'
|
||||
KEYS[6] 'completed'
|
||||
KEYS[7] 'active'
|
||||
KEYS[8] events stream key
|
||||
KEYS[9] 'pc' priority counter
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[2]
|
||||
local idKey = KEYS[3]
|
||||
local priorityKey = KEYS[4]
|
||||
|
||||
local completedKey = KEYS[6]
|
||||
local activeKey = KEYS[7]
|
||||
local eventsKey = KEYS[8]
|
||||
local priorityCounterKey = KEYS[9]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobWithPriority"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/storeJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/isQueuePausedOrMaxed"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[5],
|
||||
deduplicationKey, eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
|
||||
opts, timestamp, parentKey, parentData,
|
||||
repeatJobKey)
|
||||
|
||||
-- Add the job to the prioritized set
|
||||
local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, activeKey)
|
||||
addJobWithPriority( KEYS[1], priorityKey, priority, jobId, priorityCounterKey, isPausedOrMaxed)
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
|
||||
"jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
84
backend/node_modules/bullmq/dist/cjs/commands/addRepeatableJob-2.lua
generated
vendored
Normal file
84
backend/node_modules/bullmq/dist/cjs/commands/addRepeatableJob-2.lua
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
--[[
|
||||
Adds a repeatable job
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
KEYS[2] 'delayed' key
|
||||
|
||||
ARGV[1] next milliseconds
|
||||
ARGV[2] msgpacked options
|
||||
[1] name
|
||||
[2] tz?
|
||||
[3] pattern?
|
||||
[4] endDate?
|
||||
[5] every?
|
||||
ARGV[3] legacy custom key TODO: remove this logic in next breaking change
|
||||
ARGV[4] custom key
|
||||
ARGV[5] prefix key
|
||||
|
||||
Output:
|
||||
repeatableKey - OK
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local repeatKey = KEYS[1]
|
||||
local delayedKey = KEYS[2]
|
||||
|
||||
local nextMillis = ARGV[1]
|
||||
local legacyCustomKey = ARGV[3]
|
||||
local customKey = ARGV[4]
|
||||
local prefixKey = ARGV[5]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/removeJob"
|
||||
|
||||
local function storeRepeatableJob(repeatKey, customKey, nextMillis, rawOpts)
|
||||
rcall("ZADD", repeatKey, nextMillis, customKey)
|
||||
local opts = cmsgpack.unpack(rawOpts)
|
||||
|
||||
local optionalValues = {}
|
||||
if opts['tz'] then
|
||||
table.insert(optionalValues, "tz")
|
||||
table.insert(optionalValues, opts['tz'])
|
||||
end
|
||||
|
||||
if opts['pattern'] then
|
||||
table.insert(optionalValues, "pattern")
|
||||
table.insert(optionalValues, opts['pattern'])
|
||||
end
|
||||
|
||||
if opts['endDate'] then
|
||||
table.insert(optionalValues, "endDate")
|
||||
table.insert(optionalValues, opts['endDate'])
|
||||
end
|
||||
|
||||
if opts['every'] then
|
||||
table.insert(optionalValues, "every")
|
||||
table.insert(optionalValues, opts['every'])
|
||||
end
|
||||
|
||||
rcall("HMSET", repeatKey .. ":" .. customKey, "name", opts['name'],
|
||||
unpack(optionalValues))
|
||||
|
||||
return customKey
|
||||
end
|
||||
|
||||
-- If we are overriding a repeatable job we must delete the delayed job for
|
||||
-- the next iteration.
|
||||
local prevMillis = rcall("ZSCORE", repeatKey, customKey)
|
||||
if prevMillis then
|
||||
local delayedJobId = "repeat:" .. customKey .. ":" .. prevMillis
|
||||
local nextDelayedJobId = repeatKey .. ":" .. customKey .. ":" .. nextMillis
|
||||
|
||||
if rcall("ZSCORE", delayedKey, delayedJobId)
|
||||
and rcall("EXISTS", nextDelayedJobId) ~= 1 then
|
||||
removeJob(delayedJobId, true, prefixKey, true --[[remove debounce key]])
|
||||
rcall("ZREM", delayedKey, delayedJobId)
|
||||
end
|
||||
end
|
||||
|
||||
-- Keep backwards compatibility with old repeatable jobs (<= 3.0.0)
|
||||
if rcall("ZSCORE", repeatKey, legacyCustomKey) ~= false then
|
||||
return storeRepeatableJob(repeatKey, legacyCustomKey, nextMillis, ARGV[2])
|
||||
end
|
||||
|
||||
return storeRepeatableJob(repeatKey, customKey, nextMillis, ARGV[2])
|
||||
122
backend/node_modules/bullmq/dist/cjs/commands/addStandardJob-9.lua
generated
vendored
Normal file
122
backend/node_modules/bullmq/dist/cjs/commands/addStandardJob-9.lua
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
--[[
|
||||
Adds a job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
|
||||
- if delayed:
|
||||
- computes timestamp.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
- if not delayed
|
||||
- Adds the jobId to the wait/paused list in one of three ways:
|
||||
- LIFO
|
||||
- FIFO
|
||||
- prioritized.
|
||||
- Adds the job to the "added" list so that workers gets notified.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'meta'
|
||||
KEYS[4] 'id'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] 'delayed'
|
||||
KEYS[7] 'active'
|
||||
KEYS[8] events stream key
|
||||
KEYS[9] marker key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local eventsKey = KEYS[8]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", KEYS[4])
|
||||
|
||||
local metaKey = KEYS[3]
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, KEYS[5], eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[6],
|
||||
deduplicationKey, eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[7], KEYS[1], KEYS[2])
|
||||
|
||||
-- LIFO or FIFO
|
||||
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(target, KEYS[9], pushCmd, isPausedOrMaxed, jobId)
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
|
||||
"jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
55
backend/node_modules/bullmq/dist/cjs/commands/changeDelay-4.lua
generated
vendored
Normal file
55
backend/node_modules/bullmq/dist/cjs/commands/changeDelay-4.lua
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
--[[
|
||||
Change job delay when it is in delayed set.
|
||||
Input:
|
||||
KEYS[1] delayed key
|
||||
KEYS[2] meta key
|
||||
KEYS[3] marker key
|
||||
KEYS[4] events stream
|
||||
|
||||
ARGV[1] delay
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] the id of the job
|
||||
ARGV[4] job key
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-1 - Missing job.
|
||||
-3 - Job not in delayed set.
|
||||
|
||||
Events:
|
||||
- delayed key.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayMarkerIfNeeded"
|
||||
--- @include "includes/getDelayedScore"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
|
||||
if rcall("EXISTS", ARGV[4]) == 1 then
|
||||
local jobId = ARGV[3]
|
||||
|
||||
local delay = tonumber(ARGV[1])
|
||||
local score, delayedTimestamp = getDelayedScore(KEYS[1], ARGV[2], delay)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", KEYS[1], jobId)
|
||||
|
||||
if numRemovedElements < 1 then
|
||||
return -3
|
||||
end
|
||||
|
||||
rcall("HSET", ARGV[4], "delay", delay)
|
||||
rcall("ZADD", KEYS[1], score, jobId)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(KEYS[2])
|
||||
|
||||
rcall("XADD", KEYS[4], "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- mark that a delayed job is available
|
||||
addDelayMarkerIfNeeded(KEYS[3], KEYS[1])
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
68
backend/node_modules/bullmq/dist/cjs/commands/changePriority-7.lua
generated
vendored
Normal file
68
backend/node_modules/bullmq/dist/cjs/commands/changePriority-7.lua
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
--[[
|
||||
Change job priority
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'meta'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'active'
|
||||
KEYS[6] 'pc' priority counter
|
||||
KEYS[7] 'marker'
|
||||
|
||||
ARGV[1] priority value
|
||||
ARGV[2] prefix key
|
||||
ARGV[3] job id
|
||||
ARGV[4] lifo
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-1 - Missing job
|
||||
]]
|
||||
local jobId = ARGV[3]
|
||||
local jobKey = ARGV[2] .. jobId
|
||||
local priority = tonumber(ARGV[1])
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/addJobWithPriority"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/pushBackJobWithPriority"
|
||||
|
||||
local function reAddJobWithNewPriority( prioritizedKey, markerKey, targetKey,
|
||||
priorityCounter, lifo, priority, jobId, isPausedOrMaxed)
|
||||
if priority == 0 then
|
||||
local pushCmd = lifo and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
else
|
||||
if lifo then
|
||||
pushBackJobWithPriority(prioritizedKey, priority, jobId)
|
||||
else
|
||||
addJobWithPriority(markerKey, prioritizedKey, priority, jobId,
|
||||
priorityCounter, isPausedOrMaxed)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
local metaKey = KEYS[3]
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[5], KEYS[1], KEYS[2])
|
||||
local prioritizedKey = KEYS[4]
|
||||
local priorityCounterKey = KEYS[6]
|
||||
local markerKey = KEYS[7]
|
||||
|
||||
-- Re-add with the new priority
|
||||
if rcall("ZREM", prioritizedKey, jobId) > 0 then
|
||||
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
|
||||
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
|
||||
elseif rcall("LREM", target, -1, jobId) > 0 then
|
||||
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
|
||||
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
|
||||
end
|
||||
|
||||
rcall("HSET", jobKey, "priority", priority)
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
59
backend/node_modules/bullmq/dist/cjs/commands/cleanJobsInSet-3.lua
generated
vendored
Normal file
59
backend/node_modules/bullmq/dist/cjs/commands/cleanJobsInSet-3.lua
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
--[[
|
||||
Remove jobs from the specific set.
|
||||
|
||||
Input:
|
||||
KEYS[1] set key,
|
||||
KEYS[2] events stream key
|
||||
KEYS[3] repeat key
|
||||
|
||||
ARGV[1] jobKey prefix
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] limit the number of jobs to be removed. 0 is unlimited
|
||||
ARGV[4] set name, can be any of 'wait', 'active', 'paused', 'delayed', 'completed', or 'failed'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local repeatKey = KEYS[3]
|
||||
local rangeStart = 0
|
||||
local rangeEnd = -1
|
||||
|
||||
local limit = tonumber(ARGV[3])
|
||||
|
||||
-- If we're only deleting _n_ items, avoid retrieving all items
|
||||
-- for faster performance
|
||||
--
|
||||
-- Start from the tail of the list, since that's where oldest elements
|
||||
-- are generally added for FIFO lists
|
||||
if limit > 0 then
|
||||
rangeStart = -1 - limit + 1
|
||||
rangeEnd = -1
|
||||
end
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/cleanList"
|
||||
--- @include "includes/cleanSet"
|
||||
|
||||
local result
|
||||
if ARGV[4] == "active" then
|
||||
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], false --[[ hasFinished ]],
|
||||
repeatKey)
|
||||
elseif ARGV[4] == "delayed" then
|
||||
rangeEnd = "+inf"
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"processedOn", "timestamp"}, false --[[ hasFinished ]], repeatKey)
|
||||
elseif ARGV[4] == "prioritized" then
|
||||
rangeEnd = "+inf"
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"timestamp"}, false --[[ hasFinished ]], repeatKey)
|
||||
elseif ARGV[4] == "wait" or ARGV[4] == "paused" then
|
||||
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], true --[[ hasFinished ]],
|
||||
repeatKey)
|
||||
else
|
||||
rangeEnd = ARGV[2]
|
||||
-- No need to pass repeat key as in that moment job won't be related to a job scheduler
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"finishedOn"}, true --[[ hasFinished ]])
|
||||
end
|
||||
|
||||
rcall("XADD", KEYS[2], "*", "event", "cleaned", "count", result[2])
|
||||
|
||||
return result[1]
|
||||
41
backend/node_modules/bullmq/dist/cjs/commands/drain-5.lua
generated
vendored
Normal file
41
backend/node_modules/bullmq/dist/cjs/commands/drain-5.lua
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
--[[
|
||||
Drains the queue, removes all jobs that are waiting
|
||||
or delayed, but not active, completed or failed
|
||||
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'delayed'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'jobschedulers' (repeat)
|
||||
|
||||
ARGV[1] queue key prefix
|
||||
ARGV[2] should clean delayed jobs
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local queueBaseKey = ARGV[1]
|
||||
|
||||
--- @include "includes/removeListJobs"
|
||||
--- @include "includes/removeZSetJobs"
|
||||
|
||||
-- We must not remove delayed jobs if they are associated to a job scheduler.
|
||||
local scheduledJobs = {}
|
||||
local jobSchedulers = rcall("ZRANGE", KEYS[5], 0, -1, "WITHSCORES")
|
||||
|
||||
-- For every job scheduler, get the current delayed job id.
|
||||
for i = 1, #jobSchedulers, 2 do
|
||||
local jobSchedulerId = jobSchedulers[i]
|
||||
local jobSchedulerMillis = jobSchedulers[i + 1]
|
||||
|
||||
local delayedJobId = "repeat:" .. jobSchedulerId .. ":" .. jobSchedulerMillis
|
||||
scheduledJobs[delayedJobId] = true
|
||||
end
|
||||
|
||||
removeListJobs(KEYS[1], true, queueBaseKey, 0, scheduledJobs) -- wait
|
||||
removeListJobs(KEYS[2], true, queueBaseKey, 0, scheduledJobs) -- paused
|
||||
|
||||
if ARGV[2] == "1" then
|
||||
removeZSetJobs(KEYS[3], true, queueBaseKey, 0, scheduledJobs) -- delayed
|
||||
end
|
||||
|
||||
removeZSetJobs(KEYS[4], true, queueBaseKey, 0, scheduledJobs) -- prioritized
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/extendLock-2.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/extendLock-2.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Extend lock and removes the job from the stalled set.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'lock',
|
||||
KEYS[2] 'stalled'
|
||||
|
||||
ARGV[1] token
|
||||
ARGV[2] lock duration in milliseconds
|
||||
ARGV[3] jobid
|
||||
|
||||
Output:
|
||||
"1" if lock extented succesfully.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
if rcall("GET", KEYS[1]) == ARGV[1] then
|
||||
-- if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2], "XX") then
|
||||
if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) then
|
||||
rcall("SREM", KEYS[2], ARGV[3])
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return 0
|
||||
48
backend/node_modules/bullmq/dist/cjs/commands/extendLocks-1.lua
generated
vendored
Normal file
48
backend/node_modules/bullmq/dist/cjs/commands/extendLocks-1.lua
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
--[[
|
||||
Extend locks for multiple jobs and remove them from the stalled set if successful.
|
||||
Return the list of job IDs for which the operation failed.
|
||||
|
||||
KEYS[1] = stalled key
|
||||
|
||||
ARGV[1] = baseKey
|
||||
ARGV[2] = tokens
|
||||
ARGV[3] = jobIds
|
||||
ARGV[4] = lockDuration (ms)
|
||||
|
||||
Output:
|
||||
An array of failed job IDs. If empty, all succeeded.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
local stalledKey = KEYS[1]
|
||||
local baseKey = ARGV[1]
|
||||
local tokens = cmsgpack.unpack(ARGV[2])
|
||||
local jobIds = cmsgpack.unpack(ARGV[3])
|
||||
local lockDuration = ARGV[4]
|
||||
|
||||
local jobCount = #jobIds
|
||||
local failedJobs = {}
|
||||
|
||||
for i = 1, jobCount, 1 do
|
||||
local lockKey = baseKey .. jobIds[i] .. ':lock'
|
||||
local jobId = jobIds[i]
|
||||
local token = tokens[i]
|
||||
|
||||
local currentToken = rcall("GET", lockKey)
|
||||
if currentToken then
|
||||
if currentToken == token then
|
||||
local setResult = rcall("SET", lockKey, token, "PX", lockDuration)
|
||||
if setResult then
|
||||
rcall("SREM", stalledKey, jobId)
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
end
|
||||
|
||||
return failedJobs
|
||||
36
backend/node_modules/bullmq/dist/cjs/commands/getCounts-1.lua
generated
vendored
Normal file
36
backend/node_modules/bullmq/dist/cjs/commands/getCounts-1.lua
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
--[[
|
||||
Get counts per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] 'prefix'
|
||||
|
||||
ARGV[1...] types
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local prefix = KEYS[1]
|
||||
local results = {}
|
||||
|
||||
for i = 1, #ARGV do
|
||||
local stateKey = prefix .. ARGV[i]
|
||||
if ARGV[i] == "wait" or ARGV[i] == "paused" then
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
local marker = rcall("LINDEX", stateKey, -1)
|
||||
if marker and string.sub(marker, 1, 2) == "0:" then
|
||||
local count = rcall("LLEN", stateKey)
|
||||
if count > 1 then
|
||||
rcall("RPOP", stateKey)
|
||||
results[#results+1] = count-1
|
||||
else
|
||||
results[#results+1] = 0
|
||||
end
|
||||
else
|
||||
results[#results+1] = rcall("LLEN", stateKey)
|
||||
end
|
||||
elseif ARGV[i] == "active" then
|
||||
results[#results+1] = rcall("LLEN", stateKey)
|
||||
else
|
||||
results[#results+1] = rcall("ZCARD", stateKey)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
35
backend/node_modules/bullmq/dist/cjs/commands/getCountsPerPriority-4.lua
generated
vendored
Normal file
35
backend/node_modules/bullmq/dist/cjs/commands/getCountsPerPriority-4.lua
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
--[[
|
||||
Get counts per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] wait key
|
||||
KEYS[2] paused key
|
||||
KEYS[3] meta key
|
||||
KEYS[4] prioritized key
|
||||
|
||||
ARGV[1...] priorities
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local results = {}
|
||||
local waitKey = KEYS[1]
|
||||
local pausedKey = KEYS[2]
|
||||
local prioritizedKey = KEYS[4]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/isQueuePaused"
|
||||
|
||||
for i = 1, #ARGV do
|
||||
local priority = tonumber(ARGV[i])
|
||||
if priority == 0 then
|
||||
if isQueuePaused(KEYS[3]) then
|
||||
results[#results+1] = rcall("LLEN", pausedKey)
|
||||
else
|
||||
results[#results+1] = rcall("LLEN", waitKey)
|
||||
end
|
||||
else
|
||||
results[#results+1] = rcall("ZCOUNT", prioritizedKey,
|
||||
priority * 0x100000000, (priority + 1) * 0x100000000 - 1)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
31
backend/node_modules/bullmq/dist/cjs/commands/getDependencyCounts-4.lua
generated
vendored
Normal file
31
backend/node_modules/bullmq/dist/cjs/commands/getDependencyCounts-4.lua
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
--[[
|
||||
Get counts per child states
|
||||
|
||||
Input:
|
||||
KEYS[1] processed key
|
||||
KEYS[2] unprocessed key
|
||||
KEYS[3] ignored key
|
||||
KEYS[4] failed key
|
||||
|
||||
ARGV[1...] types
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local processedKey = KEYS[1]
|
||||
local unprocessedKey = KEYS[2]
|
||||
local ignoredKey = KEYS[3]
|
||||
local failedKey = KEYS[4]
|
||||
local results = {}
|
||||
|
||||
for i = 1, #ARGV do
|
||||
if ARGV[i] == "processed" then
|
||||
results[#results+1] = rcall("HLEN", processedKey)
|
||||
elseif ARGV[i] == "unprocessed" then
|
||||
results[#results+1] = rcall("SCARD", unprocessedKey)
|
||||
elseif ARGV[i] == "ignored" then
|
||||
results[#results+1] = rcall("HLEN", ignoredKey)
|
||||
else
|
||||
results[#results+1] = rcall("ZCARD", failedKey)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/getJobScheduler-1.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/getJobScheduler-1.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Get job scheduler record.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
|
||||
ARGV[1] id
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
local jobSchedulerKey = KEYS[1] .. ":" .. ARGV[1]
|
||||
|
||||
local score = rcall("ZSCORE", KEYS[1], ARGV[1])
|
||||
|
||||
if score then
|
||||
return {rcall("HGETALL", jobSchedulerKey), score} -- get job data
|
||||
end
|
||||
|
||||
return {nil, nil}
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/getMetrics-2.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/getMetrics-2.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Get metrics
|
||||
|
||||
Input:
|
||||
KEYS[1] 'metrics' key
|
||||
KEYS[2] 'metrics data' key
|
||||
|
||||
ARGV[1] start index
|
||||
ARGV[2] end index
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local metricsKey = KEYS[1]
|
||||
local dataKey = KEYS[2]
|
||||
|
||||
local metrics = rcall("HMGET", metricsKey, "count", "prevTS", "prevCount")
|
||||
local data = rcall("LRANGE", dataKey, tonumber(ARGV[1]), tonumber(ARGV[2]))
|
||||
local numPoints = rcall("LLEN", dataKey)
|
||||
|
||||
return {metrics, data, numPoints}
|
||||
70
backend/node_modules/bullmq/dist/cjs/commands/getRanges-1.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/cjs/commands/getRanges-1.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Get job ids per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] 'prefix'
|
||||
|
||||
ARGV[1] start
|
||||
ARGV[2] end
|
||||
ARGV[3] asc
|
||||
ARGV[4...] types
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local prefix = KEYS[1]
|
||||
local rangeStart = tonumber(ARGV[1])
|
||||
local rangeEnd = tonumber(ARGV[2])
|
||||
local asc = ARGV[3]
|
||||
local results = {}
|
||||
|
||||
local function getRangeInList(listKey, asc, rangeStart, rangeEnd, results)
|
||||
if asc == "1" then
|
||||
local modifiedRangeStart
|
||||
local modifiedRangeEnd
|
||||
if rangeStart == -1 then
|
||||
modifiedRangeStart = 0
|
||||
else
|
||||
modifiedRangeStart = -(rangeStart + 1)
|
||||
end
|
||||
|
||||
if rangeEnd == -1 then
|
||||
modifiedRangeEnd = 0
|
||||
else
|
||||
modifiedRangeEnd = -(rangeEnd + 1)
|
||||
end
|
||||
|
||||
results[#results+1] = rcall("LRANGE", listKey,
|
||||
modifiedRangeEnd,
|
||||
modifiedRangeStart)
|
||||
else
|
||||
results[#results+1] = rcall("LRANGE", listKey, rangeStart, rangeEnd)
|
||||
end
|
||||
end
|
||||
|
||||
for i = 4, #ARGV do
|
||||
local stateKey = prefix .. ARGV[i]
|
||||
if ARGV[i] == "wait" or ARGV[i] == "paused" then
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
local marker = rcall("LINDEX", stateKey, -1)
|
||||
if marker and string.sub(marker, 1, 2) == "0:" then
|
||||
local count = rcall("LLEN", stateKey)
|
||||
if count > 1 then
|
||||
rcall("RPOP", stateKey)
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
else
|
||||
results[#results+1] = {}
|
||||
end
|
||||
else
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
end
|
||||
elseif ARGV[i] == "active" then
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
else
|
||||
if asc == "1" then
|
||||
results[#results+1] = rcall("ZRANGE", stateKey, rangeStart, rangeEnd)
|
||||
else
|
||||
results[#results+1] = rcall("ZREVRANGE", stateKey, rangeStart, rangeEnd)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
26
backend/node_modules/bullmq/dist/cjs/commands/getRateLimitTtl-2.lua
generated
vendored
Normal file
26
backend/node_modules/bullmq/dist/cjs/commands/getRateLimitTtl-2.lua
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
--[[
|
||||
Get rate limit ttl
|
||||
|
||||
Input:
|
||||
KEYS[1] 'limiter'
|
||||
KEYS[2] 'meta'
|
||||
|
||||
ARGV[1] maxJobs
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/getRateLimitTTL"
|
||||
|
||||
local rateLimiterKey = KEYS[1]
|
||||
if ARGV[1] ~= "0" then
|
||||
return getRateLimitTTL(tonumber(ARGV[1]), rateLimiterKey)
|
||||
else
|
||||
local rateLimitMax = rcall("HGET", KEYS[2], "max")
|
||||
if rateLimitMax then
|
||||
return getRateLimitTTL(tonumber(rateLimitMax), rateLimiterKey)
|
||||
end
|
||||
|
||||
return rcall("PTTL", rateLimiterKey)
|
||||
end
|
||||
65
backend/node_modules/bullmq/dist/cjs/commands/getState-8.lua
generated
vendored
Normal file
65
backend/node_modules/bullmq/dist/cjs/commands/getState-8.lua
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
--[[
|
||||
Get a job state
|
||||
|
||||
Input:
|
||||
KEYS[1] 'completed' key,
|
||||
KEYS[2] 'failed' key
|
||||
KEYS[3] 'delayed' key
|
||||
KEYS[4] 'active' key
|
||||
KEYS[5] 'wait' key
|
||||
KEYS[6] 'paused' key
|
||||
KEYS[7] 'waiting-children' key
|
||||
KEYS[8] 'prioritized' key
|
||||
|
||||
ARGV[1] job id
|
||||
Output:
|
||||
'completed'
|
||||
'failed'
|
||||
'delayed'
|
||||
'active'
|
||||
'prioritized'
|
||||
'waiting'
|
||||
'waiting-children'
|
||||
'unknown'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("ZSCORE", KEYS[1], ARGV[1]) then
|
||||
return "completed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[2], ARGV[1]) then
|
||||
return "failed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[3], ARGV[1]) then
|
||||
return "delayed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[8], ARGV[1]) then
|
||||
return "prioritized"
|
||||
end
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/checkItemInList"
|
||||
|
||||
local active_items = rcall("LRANGE", KEYS[4] , 0, -1)
|
||||
if checkItemInList(active_items, ARGV[1]) ~= nil then
|
||||
return "active"
|
||||
end
|
||||
|
||||
local wait_items = rcall("LRANGE", KEYS[5] , 0, -1)
|
||||
if checkItemInList(wait_items, ARGV[1]) ~= nil then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
local paused_items = rcall("LRANGE", KEYS[6] , 0, -1)
|
||||
if checkItemInList(paused_items, ARGV[1]) ~= nil then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[7], ARGV[1]) then
|
||||
return "waiting-children"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
58
backend/node_modules/bullmq/dist/cjs/commands/getStateV2-8.lua
generated
vendored
Normal file
58
backend/node_modules/bullmq/dist/cjs/commands/getStateV2-8.lua
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
--[[
|
||||
Get a job state
|
||||
|
||||
Input:
|
||||
KEYS[1] 'completed' key,
|
||||
KEYS[2] 'failed' key
|
||||
KEYS[3] 'delayed' key
|
||||
KEYS[4] 'active' key
|
||||
KEYS[5] 'wait' key
|
||||
KEYS[6] 'paused' key
|
||||
KEYS[7] 'waiting-children' key
|
||||
KEYS[8] 'prioritized' key
|
||||
|
||||
ARGV[1] job id
|
||||
Output:
|
||||
'completed'
|
||||
'failed'
|
||||
'delayed'
|
||||
'active'
|
||||
'waiting'
|
||||
'waiting-children'
|
||||
'unknown'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("ZSCORE", KEYS[1], ARGV[1]) then
|
||||
return "completed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[2], ARGV[1]) then
|
||||
return "failed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[3], ARGV[1]) then
|
||||
return "delayed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[8], ARGV[1]) then
|
||||
return "prioritized"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[4] , ARGV[1]) then
|
||||
return "active"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[5] , ARGV[1]) then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[6] , ARGV[1]) then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[7] , ARGV[1]) then
|
||||
return "waiting-children"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
9
backend/node_modules/bullmq/dist/cjs/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/cjs/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
--[[
|
||||
Add marker if needed when a job is available.
|
||||
]]
|
||||
|
||||
local function addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
if not isPausedOrMaxed then
|
||||
rcall("ZADD", markerKey, 0, "0")
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getNextDelayedTimestamp"
|
||||
|
||||
local function addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
local nextTimestamp = getNextDelayedTimestamp(delayedKey)
|
||||
if nextTimestamp ~= nil then
|
||||
-- Replace the score of the marker with the newest known
|
||||
-- next timestamp.
|
||||
rcall("ZADD", markerKey, nextTimestamp, "1")
|
||||
end
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Adds a delayed job to the queue by doing the following:
|
||||
- Creates a new job key with the job data.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayMarkerIfNeeded"
|
||||
--- @include "getDelayedScore"
|
||||
|
||||
local function addDelayedJob(jobId, delayedKey, eventsKey, timestamp,
|
||||
maxEvents, markerKey, delay)
|
||||
|
||||
local score, delayedTimestamp = getDelayedScore(delayedKey, timestamp, tonumber(delay))
|
||||
|
||||
rcall("ZADD", delayedKey, score, jobId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- mark that a delayed job is available
|
||||
addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
end
|
||||
39
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
39
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayedJob"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "isQueuePaused"
|
||||
--- @include "storeJob"
|
||||
--- @include "getTargetQueueList"
|
||||
--- @include "addJobInTargetList"
|
||||
|
||||
local function addJobFromScheduler(jobKey, jobId, opts, waitKey, pausedKey, activeKey, metaKey,
|
||||
prioritizedKey, priorityCounter, delayedKey, markerKey, eventsKey, name, maxEvents, timestamp,
|
||||
data, jobSchedulerId, repeatDelay)
|
||||
|
||||
opts['delay'] = repeatDelay
|
||||
opts['jobId'] = jobId
|
||||
|
||||
local delay, priority = storeJob(eventsKey, jobKey, jobId, name, data,
|
||||
opts, timestamp, nil, nil, jobSchedulerId)
|
||||
|
||||
if delay ~= 0 then
|
||||
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, markerKey, delay)
|
||||
else
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey)
|
||||
|
||||
-- Standard or priority add
|
||||
if priority == 0 then
|
||||
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
else
|
||||
-- Priority add
|
||||
addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounter, isPausedOrMaxed)
|
||||
end
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", jobId)
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to add job in target list and add marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
|
||||
local function addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
rcall(pushCmd, targetKey, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to add job considering priority.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
--- @include "getPriorityScore"
|
||||
|
||||
local function addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounterKey,
|
||||
isPausedOrMaxed)
|
||||
local score = getPriorityScore(priority, priorityCounterKey)
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/cjs/commands/includes/batches.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/cjs/commands/includes/batches.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to loop in batches.
|
||||
Just a bit of warning, some commands as ZREM
|
||||
could receive a maximum of 7000 parameters per call.
|
||||
]]
|
||||
|
||||
local function batches(n, batchSize)
|
||||
local i = 0
|
||||
|
||||
return function()
|
||||
local from = i * batchSize + 1
|
||||
i = i + 1
|
||||
if (from <= n) then
|
||||
local to = math.min(from + batchSize - 1, n)
|
||||
return from, to
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/cjs/commands/includes/checkItemInList.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/cjs/commands/includes/checkItemInList.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to check if a item belongs to a list.
|
||||
]]
|
||||
|
||||
local function checkItemInList(list, item)
|
||||
for _, v in pairs(list) do
|
||||
if v == item then
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return nil
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanList.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanList.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Function to clean job list.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanList(listKey, jobKeyPrefix, rangeStart, rangeEnd,
|
||||
timestamp, isWaiting, jobSchedulersKey)
|
||||
local jobs = rcall("LRANGE", listKey, rangeStart, rangeEnd)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
local deletionMarker = ''
|
||||
local jobIdsLen = #jobs
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
if (isWaiting or rcall("EXISTS", jobKey .. ":lock") == 0) and
|
||||
not isJobSchedulerJob(job, jobKey, jobSchedulersKey) then
|
||||
-- Find the right timestamp of the job to compare to maxTimestamp:
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
-- * processedOn represents when the job was last attempted, but it doesn't get populated until
|
||||
-- the job is first tried
|
||||
-- * timestamp is the original job submission time
|
||||
-- Fetch all three of these (in that order) and use the first one that is set so that we'll leave jobs
|
||||
-- that have been active within the grace period:
|
||||
jobTS = getTimestamp(jobKey, {"finishedOn", "processedOn", "timestamp"})
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
-- replace the entry with a deletion marker; the actual deletion will
|
||||
-- occur at the end of the script
|
||||
rcall("LSET", listKey, rangeEnd - jobIdsLen + i, deletionMarker)
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]])
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
rcall("LREM", listKey, 0, deletionMarker)
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
58
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanSet.lua
generated
vendored
Normal file
58
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanSet.lua
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
--[[
|
||||
Function to clean job set.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "batches"
|
||||
--- @include "getJobsInZset"
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanSet(
|
||||
setKey,
|
||||
jobKeyPrefix,
|
||||
rangeEnd,
|
||||
timestamp,
|
||||
limit,
|
||||
attributes,
|
||||
isFinished,
|
||||
jobSchedulersKey)
|
||||
local jobs = getJobsInZset(setKey, rangeEnd, limit)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
-- Extract a Job Scheduler Id from jobId ("repeat:job-scheduler-id:millis")
|
||||
-- and check if it is in the scheduled jobs
|
||||
if not (jobSchedulersKey and isJobSchedulerJob(job, jobKey, jobSchedulersKey)) then
|
||||
if isFinished then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
else
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
jobTS = getTimestamp(jobKey, attributes)
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if (#deleted > 0) then
|
||||
for from, to in batches(#deleted, 7000) do
|
||||
rcall("ZREM", setKey, unpack(deleted, from, to))
|
||||
end
|
||||
end
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
46
backend/node_modules/bullmq/dist/cjs/commands/includes/collectMetrics.lua
generated
vendored
Normal file
46
backend/node_modules/bullmq/dist/cjs/commands/includes/collectMetrics.lua
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
--[[
|
||||
Functions to collect metrics based on a current and previous count of jobs.
|
||||
Granualarity is fixed at 1 minute.
|
||||
]]
|
||||
--- @include "batches"
|
||||
local function collectMetrics(metaKey, dataPointsList, maxDataPoints,
|
||||
timestamp)
|
||||
-- Increment current count
|
||||
local count = rcall("HINCRBY", metaKey, "count", 1) - 1
|
||||
|
||||
-- Compute how many data points we need to add to the list, N.
|
||||
local prevTS = rcall("HGET", metaKey, "prevTS")
|
||||
|
||||
if not prevTS then
|
||||
-- If prevTS is nil, set it to the current timestamp
|
||||
rcall("HSET", metaKey, "prevTS", timestamp, "prevCount", 0)
|
||||
return
|
||||
end
|
||||
|
||||
local N = math.min(math.floor(timestamp / 60000) - math.floor(prevTS / 60000), tonumber(maxDataPoints))
|
||||
|
||||
if N > 0 then
|
||||
local delta = count - rcall("HGET", metaKey, "prevCount")
|
||||
-- If N > 1, add N-1 zeros to the list
|
||||
if N > 1 then
|
||||
local points = {}
|
||||
points[1] = delta
|
||||
for i = 2, N do
|
||||
points[i] = 0
|
||||
end
|
||||
|
||||
for from, to in batches(#points, 7000) do
|
||||
rcall("LPUSH", dataPointsList, unpack(points, from, to))
|
||||
end
|
||||
else
|
||||
-- LPUSH delta to the list
|
||||
rcall("LPUSH", dataPointsList, delta)
|
||||
end
|
||||
|
||||
-- LTRIM to keep list to its max size
|
||||
rcall("LTRIM", dataPointsList, 0, maxDataPoints - 1)
|
||||
|
||||
-- update prev count with current count
|
||||
rcall("HSET", metaKey, "prevCount", count, "prevTS", timestamp)
|
||||
end
|
||||
end
|
||||
102
backend/node_modules/bullmq/dist/cjs/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
102
backend/node_modules/bullmq/dist/cjs/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
--[[
|
||||
Function to debounce a job.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "removeJobKeys"
|
||||
|
||||
local function removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents, currentDeduplicatedJobId,
|
||||
jobId, deduplicationId, prefix)
|
||||
if rcall("ZREM", delayedKey, currentDeduplicatedJobId) > 0 then
|
||||
removeJobKeys(prefix .. currentDeduplicatedJobId)
|
||||
rcall("XADD", eventsKey, "*", "event", "removed", "jobId", currentDeduplicatedJobId,
|
||||
"prev", "delayed")
|
||||
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
jobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
jobId, "deduplicationId", deduplicationId, "deduplicatedJobId", currentDeduplicatedJobId)
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local function deduplicateJob(deduplicationOpts, jobId, delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
prefix)
|
||||
local deduplicationId = deduplicationOpts and deduplicationOpts['id']
|
||||
if deduplicationId then
|
||||
local ttl = deduplicationOpts['ttl']
|
||||
if deduplicationOpts['replace'] then
|
||||
if ttl and ttl > 0 then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
if isRemoved then
|
||||
if deduplicationOpts['extend'] then
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'KEEPTTL')
|
||||
end
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
|
||||
if isRemoved then
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
end
|
||||
end
|
||||
else
|
||||
local deduplicationKeyExists
|
||||
if ttl and ttl > 0 then
|
||||
if deduplicationOpts['extend'] then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
rcall('SET', deduplicationKey, currentDebounceJobId, 'PX', ttl)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced",
|
||||
"jobId", currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'PX', ttl, 'NX')
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'NX')
|
||||
end
|
||||
|
||||
if deduplicationKeyExists then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/cjs/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/cjs/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to destructure job key.
|
||||
Just a bit of warning, these functions may be a bit slow and affect performance significantly.
|
||||
]]
|
||||
|
||||
local getJobIdFromKey = function (jobKey)
|
||||
return string.match(jobKey, ".*:(.*)")
|
||||
end
|
||||
|
||||
local getJobKeyPrefix = function (jobKey, jobId)
|
||||
return string.sub(jobKey, 0, #jobKey - #jobId)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/cjs/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/cjs/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to filter out jobs to ignore from a table.
|
||||
]]
|
||||
|
||||
local function filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
local filteredJobs = {}
|
||||
for i = 1, #jobs do
|
||||
if not jobsToIgnore[jobs[i]] then
|
||||
table.insert(filteredJobs, jobs[i])
|
||||
end
|
||||
end
|
||||
return filteredJobs
|
||||
end
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user