Projektstart

This commit is contained in:
2026-01-22 15:49:12 +01:00
parent 7212eb6f7a
commit 57e5f652f8
10637 changed files with 2598792 additions and 64 deletions

View File

@@ -0,0 +1,134 @@
"use strict";
/**
* (c) 2017-2025 BullForce Labs AB, MIT Licensed.
* @see LICENSE.md
*
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.AsyncFifoQueue = void 0;
class Node {
constructor(value) {
this.value = undefined;
this.next = null;
this.value = value;
}
}
class LinkedList {
constructor() {
this.length = 0;
this.head = null;
this.tail = null;
}
push(value) {
const newNode = new Node(value);
if (!this.length) {
this.head = newNode;
}
else {
this.tail.next = newNode;
}
this.tail = newNode;
this.length += 1;
return newNode;
}
shift() {
if (!this.length) {
return null;
}
else {
const head = this.head;
this.head = this.head.next;
this.length -= 1;
return head;
}
}
}
/**
* AsyncFifoQueue
*
* A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
* and consume them in the order they are resolved.
*/
class AsyncFifoQueue {
constructor(ignoreErrors = false) {
this.ignoreErrors = ignoreErrors;
/**
* A queue of completed promises. As the pending
* promises are resolved, they are added to this queue.
*/
this.queue = new LinkedList();
/**
* A set of pending promises.
*/
this.pending = new Set();
this.newPromise();
}
add(promise) {
this.pending.add(promise);
promise
.then(data => {
this.pending.delete(promise);
if (this.queue.length === 0) {
this.resolvePromise(data);
}
this.queue.push(data);
})
.catch(err => {
// Ignore errors
if (this.ignoreErrors) {
this.queue.push(undefined);
}
this.pending.delete(promise);
this.rejectPromise(err);
});
}
async waitAll() {
await Promise.all(this.pending);
}
numTotal() {
return this.pending.size + this.queue.length;
}
numPending() {
return this.pending.size;
}
numQueued() {
return this.queue.length;
}
resolvePromise(data) {
this.resolve(data);
this.newPromise();
}
rejectPromise(err) {
this.reject(err);
this.newPromise();
}
newPromise() {
this.nextPromise = new Promise((resolve, reject) => {
this.resolve = resolve;
this.reject = reject;
});
}
async wait() {
return this.nextPromise;
}
async fetch() {
var _a;
if (this.pending.size === 0 && this.queue.length === 0) {
return;
}
while (this.queue.length === 0) {
try {
await this.wait();
}
catch (err) {
// Ignore errors
if (!this.ignoreErrors) {
console.error('Unexpected Error in AsyncFifoQueue', err);
}
}
}
return (_a = this.queue.shift()) === null || _a === void 0 ? void 0 : _a.value;
}
}
exports.AsyncFifoQueue = AsyncFifoQueue;
//# sourceMappingURL=async-fifo-queue.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"async-fifo-queue.js","sourceRoot":"","sources":["../../../src/classes/async-fifo-queue.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAEH,MAAM,IAAI;IAIR,YAAY,KAAQ;QAHpB,UAAK,GAAkB,SAAS,CAAC;QACjC,SAAI,GAAmB,IAAI,CAAC;QAG1B,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;CACF;AAED,MAAM,UAAU;IAKd;QAJA,WAAM,GAAG,CAAC,CAAC;QAKT,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;QACjB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED,IAAI,CAAC,KAAQ;QACX,MAAM,OAAO,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,CAAC;QAChC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACtB,CAAC;aAAM,CAAC;YACN,IAAI,CAAC,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QAC3B,CAAC;QAED,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACpB,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;QACjB,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,KAAK;QACH,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,OAAO,IAAI,CAAC;QACd,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;YACvB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC;YAC3B,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;YAEjB,OAAO,IAAI,CAAC;QACd,CAAC;IACH,CAAC;CACF;AAED;;;;;GAKG;AACH,MAAa,cAAc;IAqBzB,YAAoB,eAAe,KAAK;QAApB,iBAAY,GAAZ,YAAY,CAAQ;QApBxC;;;WAGG;QACK,UAAK,GAAkB,IAAI,UAAU,EAAE,CAAC;QAEhD;;WAEG;QACK,YAAO,GAAG,IAAI,GAAG,EAAc,CAAC;QAYtC,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEM,GAAG,CAAC,OAAmB;QAC5B,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAE1B,OAAO;aACJ,IAAI,CAAC,IAAI,CAAC,EAAE;YACX,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAE7B,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBAC5B,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC;YAC5B,CAAC;YACD,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QACxB,CAAC,CAAC;aACD,KAAK,CAAC,GAAG,CAAC,EAAE;YACX,gBAAgB;YAChB,IAAI,IAAI,CAAC,YAAY,EAAE,CAAC;gBACtB,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;YAC7B,CAAC;YACD,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAC7B,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC;QAC1B,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,KAAK,CAAC,OAAO;QAClB,MAAM,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IAClC,CAAC;IAEM,QAAQ;QACb,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC/C,CAAC;IAEM,UAAU;QACf,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC;IAC3B,CAAC;IAEM,SAAS;QACd,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC3B,CAAC;IAEO,cAAc,CAAC,IAAO;QAC5B,IAAI,CAAC,OAAQ,CAAC,IAAI,CAAC,CAAC;QACpB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,aAAa,CAAC,GAAQ;QAC5B,IAAI,CAAC,MAAO,CAAC,GAAG,CAAC,CAAC;QAClB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,UAAU;QAChB,IAAI,CAAC,WAAW,GAAG,IAAI,OAAO,CAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;YAChE,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACvB,CAAC,CAAC,CAAC;IACL,CAAC;IAEO,KAAK,CAAC,IAAI;QAChB,OAAO,IAAI,CAAC,WAAW,CAAC;IAC1B,CAAC;IAEM,KAAK,CAAC,KAAK;;QAChB,IAAI,IAAI,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACvD,OAAO;QACT,CAAC;QACD,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC/B,IAAI,CAAC;gBACH,MAAM,IAAI,CAAC,IAAI,EAAE,CAAC;YACpB,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,gBAAgB;gBAChB,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,CAAC;oBACvB,OAAO,CAAC,KAAK,CAAC,oCAAoC,EAAE,GAAG,CAAC,CAAC;gBAC3D,CAAC;YACH,CAAC;QACH,CAAC;QACD,OAAO,MAAA,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,0CAAE,KAAK,CAAC;IACnC,CAAC;CACF;AApGD,wCAoGC"}

View File

@@ -0,0 +1,61 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Backoffs = void 0;
class Backoffs {
static normalize(backoff) {
if (Number.isFinite(backoff)) {
return {
type: 'fixed',
delay: backoff,
};
}
else if (backoff) {
return backoff;
}
}
static calculate(backoff, attemptsMade, err, job, customStrategy) {
if (backoff) {
const strategy = lookupStrategy(backoff, customStrategy);
return strategy(attemptsMade, backoff.type, err, job);
}
}
}
exports.Backoffs = Backoffs;
Backoffs.builtinStrategies = {
fixed: function (delay, jitter = 0) {
return function () {
if (jitter > 0) {
const minDelay = delay * (1 - jitter);
return Math.floor(Math.random() * delay * jitter + minDelay);
}
else {
return delay;
}
};
},
exponential: function (delay, jitter = 0) {
return function (attemptsMade) {
if (jitter > 0) {
const maxDelay = Math.round(Math.pow(2, attemptsMade - 1) * delay);
const minDelay = maxDelay * (1 - jitter);
return Math.floor(Math.random() * maxDelay * jitter + minDelay);
}
else {
return Math.round(Math.pow(2, attemptsMade - 1) * delay);
}
};
},
};
function lookupStrategy(backoff, customStrategy) {
if (backoff.type in Backoffs.builtinStrategies) {
return Backoffs.builtinStrategies[backoff.type](backoff.delay, backoff.jitter);
}
else if (customStrategy) {
return customStrategy;
}
else {
throw new Error(`Unknown backoff strategy ${backoff.type}.
If a custom backoff strategy is used, specify it when the queue is created.`);
}
}
//# sourceMappingURL=backoffs.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"backoffs.js","sourceRoot":"","sources":["../../../src/classes/backoffs.ts"],"names":[],"mappings":";;;AAQA,MAAa,QAAQ;IA4BnB,MAAM,CAAC,SAAS,CACd,OAAgC;QAEhC,IAAI,MAAM,CAAC,QAAQ,CAAS,OAAO,CAAC,EAAE,CAAC;YACrC,OAAO;gBACL,IAAI,EAAE,OAAO;gBACb,KAAK,EAAU,OAAO;aACvB,CAAC;QACJ,CAAC;aAAM,IAAI,OAAO,EAAE,CAAC;YACnB,OAAuB,OAAO,CAAC;QACjC,CAAC;IACH,CAAC;IAED,MAAM,CAAC,SAAS,CACd,OAAuB,EACvB,YAAoB,EACpB,GAAU,EACV,GAAe,EACf,cAAgC;QAEhC,IAAI,OAAO,EAAE,CAAC;YACZ,MAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;YAEzD,OAAO,QAAQ,CAAC,YAAY,EAAE,OAAO,CAAC,IAAI,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;QACxD,CAAC;IACH,CAAC;;AArDH,4BAsDC;AArDQ,0BAAiB,GAAsB;IAC5C,KAAK,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QACxC,OAAO;YACL,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,KAAK,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEtC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,KAAK,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAC/D,CAAC;iBAAM,CAAC;gBACN,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;IAED,WAAW,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QAC9C,OAAO,UAAU,YAAoB;YACnC,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;gBACnE,MAAM,QAAQ,GAAG,QAAQ,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEzC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,QAAQ,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAClE,CAAC;iBAAM,CAAC;gBACN,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;YAC3D,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;CACF,CAAC;AA8BJ,SAAS,cAAc,CACrB,OAAuB,EACvB,cAAgC;IAEhC,IAAI,OAAO,CAAC,IAAI,IAAI,QAAQ,CAAC,iBAAiB,EAAE,CAAC;QAC/C,OAAO,QAAQ,CAAC,iBAAiB,CAAC,OAAO,CAAC,IAAI,CAAC,CAC7C,OAAO,CAAC,KAAM,EACd,OAAO,CAAC,MAAM,CACf,CAAC;IACJ,CAAC;SAAM,IAAI,cAAc,EAAE,CAAC;QAC1B,OAAO,cAAc,CAAC;IACxB,CAAC;SAAM,CAAC;QACN,MAAM,IAAI,KAAK,CACb,4BAA4B,OAAO,CAAC,IAAI;kFACoC,CAC7E,CAAC;IACJ,CAAC;AACH,CAAC"}

View File

@@ -0,0 +1,83 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ChildPool = void 0;
const path = require("path");
const child_1 = require("./child");
const CHILD_KILL_TIMEOUT = 30000;
const supportCJS = () => {
return (typeof require === 'function' &&
typeof module === 'object' &&
typeof module.exports === 'object');
};
class ChildPool {
constructor({ mainFile = supportCJS()
? path.join(process.cwd(), 'dist/cjs/classes/main.js')
: path.join(process.cwd(), 'dist/esm/classes/main.js'), useWorkerThreads, workerForkOptions, workerThreadsOptions, }) {
this.retained = {};
this.free = {};
this.opts = {
mainFile,
useWorkerThreads,
workerForkOptions,
workerThreadsOptions,
};
}
async retain(processFile) {
let child = this.getFree(processFile).pop();
if (child) {
this.retained[child.pid] = child;
return child;
}
child = new child_1.Child(this.opts.mainFile, processFile, {
useWorkerThreads: this.opts.useWorkerThreads,
workerForkOptions: this.opts.workerForkOptions,
workerThreadsOptions: this.opts.workerThreadsOptions,
});
child.on('exit', this.remove.bind(this, child));
try {
await child.init();
// Check status here as well, in case the child exited before we could
// retain it.
if (child.exitCode !== null || child.signalCode !== null) {
throw new Error('Child exited before it could be retained');
}
this.retained[child.pid] = child;
return child;
}
catch (err) {
console.error(err);
this.release(child);
throw err;
}
}
release(child) {
delete this.retained[child.pid];
this.getFree(child.processFile).push(child);
}
remove(child) {
delete this.retained[child.pid];
const free = this.getFree(child.processFile);
const childIndex = free.indexOf(child);
if (childIndex > -1) {
free.splice(childIndex, 1);
}
}
async kill(child, signal = 'SIGKILL') {
this.remove(child);
return child.kill(signal, CHILD_KILL_TIMEOUT);
}
async clean() {
const children = Object.values(this.retained).concat(this.getAllFree());
this.retained = {};
this.free = {};
await Promise.all(children.map(c => this.kill(c, 'SIGTERM')));
}
getFree(id) {
return (this.free[id] = this.free[id] || []);
}
getAllFree() {
return Object.values(this.free).reduce((first, second) => first.concat(second), []);
}
}
exports.ChildPool = ChildPool;
//# sourceMappingURL=child-pool.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"child-pool.js","sourceRoot":"","sources":["../../../src/classes/child-pool.ts"],"names":[],"mappings":";;;AAAA,6BAA6B;AAC7B,mCAAgC;AAGhC,MAAM,kBAAkB,GAAG,KAAM,CAAC;AAMlC,MAAM,UAAU,GAAG,GAAG,EAAE;IACtB,OAAO,CACL,OAAO,OAAO,KAAK,UAAU;QAC7B,OAAO,MAAM,KAAK,QAAQ;QAC1B,OAAO,MAAM,CAAC,OAAO,KAAK,QAAQ,CACnC,CAAC;AACJ,CAAC,CAAC;AAEF,MAAa,SAAS;IAKpB,YAAY,EACV,QAAQ,GAAG,UAAU,EAAE;QACrB,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC;QACtD,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC,EACxD,gBAAgB,EAChB,iBAAiB,EACjB,oBAAoB,GACN;QAXhB,aAAQ,GAA6B,EAAE,CAAC;QACxC,SAAI,GAA+B,EAAE,CAAC;QAWpC,IAAI,CAAC,IAAI,GAAG;YACV,QAAQ;YACR,gBAAgB;YAChB,iBAAiB;YACjB,oBAAoB;SACrB,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,MAAM,CAAC,WAAmB;QAC9B,IAAI,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC,GAAG,EAAE,CAAC;QAE5C,IAAI,KAAK,EAAE,CAAC;YACV,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YACjC,OAAO,KAAK,CAAC;QACf,CAAC;QAED,KAAK,GAAG,IAAI,aAAK,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE;YACjD,gBAAgB,EAAE,IAAI,CAAC,IAAI,CAAC,gBAAgB;YAC5C,iBAAiB,EAAE,IAAI,CAAC,IAAI,CAAC,iBAAiB;YAC9C,oBAAoB,EAAE,IAAI,CAAC,IAAI,CAAC,oBAAoB;SACrD,CAAC,CAAC;QAEH,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,CAAC;QAEhD,IAAI,CAAC;YACH,MAAM,KAAK,CAAC,IAAI,EAAE,CAAC;YAEnB,sEAAsE;YACtE,aAAa;YACb,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;gBACzD,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;YAC9D,CAAC;YAED,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YAEjC,OAAO,KAAK,CAAC;QACf,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YACnB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;YACpB,MAAM,GAAG,CAAC;QACZ,CAAC;IACH,CAAC;IAED,OAAO,CAAC,KAAY;QAClB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAChC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IAC9C,CAAC;IAED,MAAM,CAAC,KAAY;QACjB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAEhC,MAAM,IAAI,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;QAE7C,MAAM,UAAU,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;QACvC,IAAI,UAAU,GAAG,CAAC,CAAC,EAAE,CAAC;YACpB,IAAI,CAAC,MAAM,CAAC,UAAU,EAAE,CAAC,CAAC,CAAC;QAC7B,CAAC;IACH,CAAC;IAED,KAAK,CAAC,IAAI,CACR,KAAY,EACZ,SAAgC,SAAS;QAEzC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QACnB,OAAO,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,kBAAkB,CAAC,CAAC;IAChD,CAAC;IAED,KAAK,CAAC,KAAK;QACT,MAAM,QAAQ,GAAG,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE,CAAC,CAAC;QACxE,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;QACnB,IAAI,CAAC,IAAI,GAAG,EAAE,CAAC;QAEf,MAAM,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC;IAChE,CAAC;IAED,OAAO,CAAC,EAAU;QAChB,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED,UAAU;QACR,OAAO,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,MAAM,CACpC,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EACvC,EAAE,CACH,CAAC;IACJ,CAAC;CACF;AAlGD,8BAkGC"}

View File

@@ -0,0 +1,220 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ChildProcessor = void 0;
const enums_1 = require("../enums");
const utils_1 = require("../utils");
var ChildStatus;
(function (ChildStatus) {
ChildStatus[ChildStatus["Idle"] = 0] = "Idle";
ChildStatus[ChildStatus["Started"] = 1] = "Started";
ChildStatus[ChildStatus["Terminating"] = 2] = "Terminating";
ChildStatus[ChildStatus["Errored"] = 3] = "Errored";
})(ChildStatus || (ChildStatus = {}));
const RESPONSE_TIMEOUT = process.env.NODE_ENV === 'test' ? 500 : 5000;
/**
* ChildProcessor
*
* This class acts as the interface between a child process and it parent process
* so that jobs can be processed in different processes.
*
*/
class ChildProcessor {
constructor(send, receiver) {
this.send = send;
this.receiver = receiver;
}
async init(processorFile) {
let processor;
try {
const { default: processorFn } = await import(processorFile);
processor = processorFn;
if (processor.default) {
// support es2015 module.
processor = processor.default;
}
if (typeof processor !== 'function') {
throw new Error('No function is exported in processor file');
}
}
catch (err) {
this.status = ChildStatus.Errored;
return this.send({
cmd: enums_1.ParentCommand.InitFailed,
err: (0, utils_1.errorToJSON)(err),
});
}
const origProcessor = processor;
processor = function (job, token) {
try {
return Promise.resolve(origProcessor(job, token));
}
catch (err) {
return Promise.reject(err);
}
};
this.processor = processor;
this.status = ChildStatus.Idle;
await this.send({
cmd: enums_1.ParentCommand.InitCompleted,
});
}
async start(jobJson, token) {
if (this.status !== ChildStatus.Idle) {
return this.send({
cmd: enums_1.ParentCommand.Error,
err: (0, utils_1.errorToJSON)(new Error('cannot start a not idling child process')),
});
}
this.status = ChildStatus.Started;
this.currentJobPromise = (async () => {
try {
const job = this.wrapJob(jobJson, this.send);
const result = await this.processor(job, token);
await this.send({
cmd: enums_1.ParentCommand.Completed,
value: typeof result === 'undefined' ? null : result,
});
}
catch (err) {
await this.send({
cmd: enums_1.ParentCommand.Failed,
value: (0, utils_1.errorToJSON)(!err.message ? new Error(err) : err),
});
}
finally {
this.status = ChildStatus.Idle;
this.currentJobPromise = undefined;
}
})();
}
async stop() { }
async waitForCurrentJobAndExit() {
this.status = ChildStatus.Terminating;
try {
await this.currentJobPromise;
}
finally {
process.exit(process.exitCode || 0);
}
}
/**
* Enhance the given job argument with some functions
* that can be called from the sandboxed job processor.
*
* Note, the `job` argument is a JSON deserialized message
* from the main node process to this forked child process,
* the functions on the original job object are not in tact.
* The wrapped job adds back some of those original functions.
*/
wrapJob(job, send) {
const wrappedJob = Object.assign(Object.assign({}, job), { queueQualifiedName: job.queueQualifiedName, data: JSON.parse(job.data || '{}'), opts: job.opts, returnValue: JSON.parse(job.returnvalue || '{}'),
/*
* Proxy `updateProgress` function, should works as `progress` function.
*/
async updateProgress(progress) {
// Locally store reference to new progress value
// so that we can return it from this process synchronously.
this.progress = progress;
// Send message to update job progress.
await send({
cmd: enums_1.ParentCommand.Progress,
value: progress,
});
},
/*
* Proxy job `log` function.
*/
log: async (row) => {
await send({
cmd: enums_1.ParentCommand.Log,
value: row,
});
},
/*
* Proxy `moveToDelayed` function.
*/
moveToDelayed: async (timestamp, token) => {
await send({
cmd: enums_1.ParentCommand.MoveToDelayed,
value: { timestamp, token },
});
},
/*
* Proxy `moveToWait` function.
*/
moveToWait: async (token) => {
await send({
cmd: enums_1.ParentCommand.MoveToWait,
value: { token },
});
},
/*
* Proxy `moveToWaitingChildren` function.
*/
moveToWaitingChildren: async (token, opts) => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: enums_1.ParentCommand.MoveToWaitingChildren,
value: { token, opts },
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'moveToWaitingChildren');
},
/*
* Proxy `updateData` function.
*/
updateData: async (data) => {
await send({
cmd: enums_1.ParentCommand.Update,
value: data,
});
wrappedJob.data = data;
},
/**
* Proxy `getChildrenValues` function.
*/
getChildrenValues: async () => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: enums_1.ParentCommand.GetChildrenValues,
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getChildrenValues');
},
/**
* Proxy `getIgnoredChildrenFailures` function.
*
* This method sends a request to retrieve the failures of ignored children
* and waits for a response from the parent process.
*
* @returns - A promise that resolves with the ignored children failures.
* The exact structure of the returned data depends on the parent process implementation.
*/
getIgnoredChildrenFailures: async () => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: enums_1.ParentCommand.GetIgnoredChildrenFailures,
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getIgnoredChildrenFailures');
} });
return wrappedJob;
}
}
exports.ChildProcessor = ChildProcessor;
const waitResponse = async (requestId, receiver, timeout, cmd) => {
return new Promise((resolve, reject) => {
const listener = (msg) => {
if (msg.requestId === requestId) {
resolve(msg.value);
receiver.off('message', listener);
}
};
receiver.on('message', listener);
setTimeout(() => {
receiver.off('message', listener);
reject(new Error(`TimeoutError: ${cmd} timed out in (${timeout}ms)`));
}, timeout);
});
};
//# sourceMappingURL=child-processor.js.map

File diff suppressed because one or more lines are too long

212
backend/node_modules/bullmq/dist/cjs/classes/child.js generated vendored Normal file
View File

@@ -0,0 +1,212 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Child = void 0;
const child_process_1 = require("child_process");
const net_1 = require("net");
const worker_threads_1 = require("worker_threads");
const enums_1 = require("../enums");
const events_1 = require("events");
/**
* @see https://nodejs.org/api/process.html#process_exit_codes
*/
const exitCodesErrors = {
1: 'Uncaught Fatal Exception',
2: 'Unused',
3: 'Internal JavaScript Parse Error',
4: 'Internal JavaScript Evaluation Failure',
5: 'Fatal Error',
6: 'Non-function Internal Exception Handler',
7: 'Internal Exception Handler Run-Time Failure',
8: 'Unused',
9: 'Invalid Argument',
10: 'Internal JavaScript Run-Time Failure',
12: 'Invalid Debug Argument',
13: 'Unfinished Top-Level Await',
};
/**
* Child class
*
* This class is used to create a child process or worker thread, and allows using
* isolated processes or threads for processing jobs.
*
*/
class Child extends events_1.EventEmitter {
constructor(mainFile, processFile, opts = {
useWorkerThreads: false,
}) {
super();
this.mainFile = mainFile;
this.processFile = processFile;
this.opts = opts;
this._exitCode = null;
this._signalCode = null;
this._killed = false;
}
get pid() {
if (this.childProcess) {
return this.childProcess.pid;
}
else if (this.worker) {
// Worker threads pids can become negative when they are terminated
// so we need to use the absolute value to index the retained object
return Math.abs(this.worker.threadId);
}
else {
throw new Error('No child process or worker thread');
}
}
get exitCode() {
return this._exitCode;
}
get signalCode() {
return this._signalCode;
}
get killed() {
if (this.childProcess) {
return this.childProcess.killed;
}
return this._killed;
}
async init() {
const execArgv = await convertExecArgv(process.execArgv);
let parent;
if (this.opts.useWorkerThreads) {
this.worker = parent = new worker_threads_1.Worker(this.mainFile, Object.assign({ execArgv, stdin: true, stdout: true, stderr: true }, (this.opts.workerThreadsOptions
? this.opts.workerThreadsOptions
: {})));
}
else {
this.childProcess = parent = (0, child_process_1.fork)(this.mainFile, [], Object.assign({ execArgv, stdio: 'pipe' }, (this.opts.workerForkOptions ? this.opts.workerForkOptions : {})));
}
parent.on('exit', (exitCode, signalCode) => {
this._exitCode = exitCode;
// Coerce to null if undefined for backwards compatibility
signalCode = typeof signalCode === 'undefined' ? null : signalCode;
this._signalCode = signalCode;
this._killed = true;
this.emit('exit', exitCode, signalCode);
// Clean all listeners, we do not expect any more events after "exit"
parent.removeAllListeners();
this.removeAllListeners();
});
parent.on('error', (...args) => this.emit('error', ...args));
parent.on('message', (...args) => this.emit('message', ...args));
parent.on('close', (...args) => this.emit('close', ...args));
parent.stdout.pipe(process.stdout);
parent.stderr.pipe(process.stderr);
await this.initChild();
}
async send(msg) {
return new Promise((resolve, reject) => {
if (this.childProcess) {
this.childProcess.send(msg, (err) => {
if (err) {
reject(err);
}
else {
resolve();
}
});
}
else if (this.worker) {
resolve(this.worker.postMessage(msg));
}
else {
resolve();
}
});
}
killProcess(signal = 'SIGKILL') {
if (this.childProcess) {
this.childProcess.kill(signal);
}
else if (this.worker) {
this.worker.terminate();
}
}
async kill(signal = 'SIGKILL', timeoutMs) {
if (this.hasProcessExited()) {
return;
}
const onExit = onExitOnce(this.childProcess || this.worker);
this.killProcess(signal);
if (timeoutMs !== undefined && (timeoutMs === 0 || isFinite(timeoutMs))) {
const timeoutHandle = setTimeout(() => {
if (!this.hasProcessExited()) {
this.killProcess('SIGKILL');
}
}, timeoutMs);
await onExit;
clearTimeout(timeoutHandle);
}
await onExit;
}
async initChild() {
const onComplete = new Promise((resolve, reject) => {
const onMessageHandler = (msg) => {
if (msg.cmd === enums_1.ParentCommand.InitCompleted) {
resolve();
}
else if (msg.cmd === enums_1.ParentCommand.InitFailed) {
const err = new Error();
err.stack = msg.err.stack;
err.message = msg.err.message;
reject(err);
}
this.off('message', onMessageHandler);
this.off('close', onCloseHandler);
};
const onCloseHandler = (code, signal) => {
if (code > 128) {
code -= 128;
}
const msg = exitCodesErrors[code] || `Unknown exit code ${code}`;
reject(new Error(`Error initializing child: ${msg} and signal ${signal}`));
this.off('message', onMessageHandler);
this.off('close', onCloseHandler);
};
this.on('message', onMessageHandler);
this.on('close', onCloseHandler);
});
await this.send({
cmd: enums_1.ChildCommand.Init,
value: this.processFile,
});
await onComplete;
}
hasProcessExited() {
return !!(this.exitCode !== null || this.signalCode);
}
}
exports.Child = Child;
function onExitOnce(child) {
return new Promise(resolve => {
child.once('exit', () => resolve());
});
}
const getFreePort = async () => {
return new Promise(resolve => {
const server = (0, net_1.createServer)();
server.listen(0, () => {
const { port } = server.address();
server.close(() => resolve(port));
});
});
};
const convertExecArgv = async (execArgv) => {
const standard = [];
const convertedArgs = [];
for (let i = 0; i < execArgv.length; i++) {
const arg = execArgv[i];
if (arg.indexOf('--inspect') === -1) {
standard.push(arg);
}
else {
const argName = arg.split('=')[0];
const port = await getFreePort();
convertedArgs.push(`${argName}=${port}`);
}
}
return standard.concat(convertedArgs);
};
//# sourceMappingURL=child.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,20 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.DelayedError = exports.DELAYED_ERROR = void 0;
exports.DELAYED_ERROR = 'bullmq:movedToDelayed';
/**
* DelayedError
*
* Error to be thrown when job is moved to delayed state
* from job in active state.
*
*/
class DelayedError extends Error {
constructor(message = exports.DELAYED_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
exports.DelayedError = DelayedError;
//# sourceMappingURL=delayed-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"delayed-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/delayed-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,aAAa,GAAG,uBAAuB,CAAC;AAErD;;;;;;GAMG;AACH,MAAa,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,qBAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oCAMC"}

View File

@@ -0,0 +1,9 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
tslib_1.__exportStar(require("./delayed-error"), exports);
tslib_1.__exportStar(require("./rate-limit-error"), exports);
tslib_1.__exportStar(require("./unrecoverable-error"), exports);
tslib_1.__exportStar(require("./waiting-children-error"), exports);
tslib_1.__exportStar(require("./waiting-error"), exports);
//# sourceMappingURL=index.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../src/classes/errors/index.ts"],"names":[],"mappings":";;;AAAA,0DAAgC;AAChC,6DAAmC;AACnC,gEAAsC;AACtC,mEAAyC;AACzC,0DAAgC"}

View File

@@ -0,0 +1,19 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.RateLimitError = exports.RATE_LIMIT_ERROR = void 0;
exports.RATE_LIMIT_ERROR = 'bullmq:rateLimitExceeded';
/**
* RateLimitError
*
* Error to be thrown when queue reaches a rate limit.
*
*/
class RateLimitError extends Error {
constructor(message = exports.RATE_LIMIT_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
exports.RateLimitError = RateLimitError;
//# sourceMappingURL=rate-limit-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"rate-limit-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/rate-limit-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,gBAAgB,GAAG,0BAA0B,CAAC;AAE3D;;;;;GAKG;AACH,MAAa,cAAe,SAAQ,KAAK;IACvC,YAAY,UAAkB,wBAAgB;QAC5C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,wCAMC"}

View File

@@ -0,0 +1,20 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.UnrecoverableError = exports.UNRECOVERABLE_ERROR = void 0;
exports.UNRECOVERABLE_ERROR = 'bullmq:unrecoverable';
/**
* UnrecoverableError
*
* Error to move a job to failed even if the attemptsMade
* are lower than the expected limit.
*
*/
class UnrecoverableError extends Error {
constructor(message = exports.UNRECOVERABLE_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
exports.UnrecoverableError = UnrecoverableError;
//# sourceMappingURL=unrecoverable-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"unrecoverable-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/unrecoverable-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,mBAAmB,GAAG,sBAAsB,CAAC;AAE1D;;;;;;GAMG;AACH,MAAa,kBAAmB,SAAQ,KAAK;IAC3C,YAAY,UAAkB,2BAAmB;QAC/C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,gDAMC"}

View File

@@ -0,0 +1,20 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.WaitingChildrenError = exports.WAITING_CHILDREN_ERROR = void 0;
exports.WAITING_CHILDREN_ERROR = 'bullmq:movedToWaitingChildren';
/**
* WaitingChildrenError
*
* Error to be thrown when job is moved to waiting-children state
* from job in active state.
*
*/
class WaitingChildrenError extends Error {
constructor(message = exports.WAITING_CHILDREN_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
exports.WaitingChildrenError = WaitingChildrenError;
//# sourceMappingURL=waiting-children-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"waiting-children-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-children-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,sBAAsB,GAAG,+BAA+B,CAAC;AAEtE;;;;;;GAMG;AACH,MAAa,oBAAqB,SAAQ,KAAK;IAC7C,YAAY,UAAkB,8BAAsB;QAClD,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oDAMC"}

View File

@@ -0,0 +1,19 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.WaitingError = exports.WAITING_ERROR = void 0;
exports.WAITING_ERROR = 'bullmq:movedToWait';
/**
* WaitingError
*
* Error to be thrown when job is moved to wait or prioritized state
* from job in active state.
*/
class WaitingError extends Error {
constructor(message = exports.WAITING_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
exports.WaitingError = WaitingError;
//# sourceMappingURL=waiting-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"waiting-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-error.ts"],"names":[],"mappings":";;;AAAa,QAAA,aAAa,GAAG,oBAAoB,CAAC;AAElD;;;;;GAKG;AACH,MAAa,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,qBAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF;AAND,oCAMC"}

View File

@@ -0,0 +1,354 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.FlowProducer = void 0;
const events_1 = require("events");
const uuid_1 = require("uuid");
const utils_1 = require("../utils");
const job_1 = require("./job");
const queue_keys_1 = require("./queue-keys");
const redis_connection_1 = require("./redis-connection");
const enums_1 = require("../enums");
/**
* This class allows to add jobs with dependencies between them in such
* a way that it is possible to build complex flows.
* Note: A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
* All Jobs can be in different queues, either children or parents,
*/
class FlowProducer extends events_1.EventEmitter {
constructor(opts = { connection: {} }, Connection = redis_connection_1.RedisConnection) {
super();
this.opts = opts;
this.opts = Object.assign({ prefix: 'bull' }, opts);
this.connection = new Connection(opts.connection, {
shared: (0, utils_1.isRedisInstance)(opts.connection),
blocking: false,
skipVersionCheck: opts.skipVersionCheck,
skipWaitingForReady: opts.skipWaitingForReady,
});
this.connection.on('error', (error) => this.emit('error', error));
this.connection.on('close', () => {
if (!this.closing) {
this.emit('ioredis:close');
}
});
this.queueKeys = new queue_keys_1.QueueKeys(opts.prefix);
if (opts === null || opts === void 0 ? void 0 : opts.telemetry) {
this.telemetry = opts.telemetry;
}
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client() {
return this.connection.client;
}
/**
* Helper to easily extend Job class calls.
*/
get Job() {
return job_1.Job;
}
waitUntilReady() {
return this.client;
}
/**
* Adds a flow.
*
* This call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flow - an object with a tree-like structure where children jobs
* will be processed before their parents.
* @param opts - options that will be applied to the flow object.
*/
async add(flow, opts) {
var _a;
if (this.closing) {
return;
}
const client = await this.connection.client;
const multi = client.multi();
const parentOpts = (_a = flow === null || flow === void 0 ? void 0 : flow.opts) === null || _a === void 0 ? void 0 : _a.parent;
const parentKey = (0, utils_1.getParentKey)(parentOpts);
const parentDependenciesKey = parentKey
? `${parentKey}:dependencies`
: undefined;
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, flow.queueName, 'addFlow', flow.queueName, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.FlowName]: flow.name,
});
const jobsTree = await this.addNode({
multi,
node: flow,
queuesOpts: opts === null || opts === void 0 ? void 0 : opts.queuesOptions,
parent: {
parentOpts,
parentDependenciesKey,
},
});
await multi.exec();
return jobsTree;
});
}
/**
* Get a flow.
*
* @param opts - an object with options for getting a JobNode.
*/
async getFlow(opts) {
if (this.closing) {
return;
}
const client = await this.connection.client;
const updatedOpts = Object.assign({
depth: 10,
maxChildren: 20,
prefix: this.opts.prefix,
}, opts);
const jobsTree = this.getNode(client, updatedOpts);
return jobsTree;
}
/**
* Adds multiple flows.
*
* A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
*
* All Jobs can be in different queues, either children or parents,
* however this call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flows - an array of objects with a tree-like structure where children jobs
* will be processed before their parents.
*/
async addBulk(flows) {
if (this.closing) {
return;
}
const client = await this.connection.client;
const multi = client.multi();
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, '', 'addBulkFlows', '', async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.BulkCount]: flows.length,
[enums_1.TelemetryAttributes.BulkNames]: flows
.map(flow => flow.name)
.join(','),
});
const jobsTrees = await this.addNodes(multi, flows);
await multi.exec();
return jobsTrees;
});
}
/**
* Add a node (job) of a flow to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param node - the node representing a job to be added to some queue
* @param parent - parent data sent to children to create the "links" to their parent
* @returns
*/
async addNode({ multi, node, parent, queuesOpts, }) {
var _a, _b;
const prefix = node.prefix || this.opts.prefix;
const queue = this.queueFromNode(node, new queue_keys_1.QueueKeys(prefix), prefix);
const queueOpts = queuesOpts && queuesOpts[node.queueName];
const jobsOpts = (_a = queueOpts === null || queueOpts === void 0 ? void 0 : queueOpts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
const jobId = ((_b = node.opts) === null || _b === void 0 ? void 0 : _b.jobId) || (0, uuid_1.v4)();
return (0, utils_1.trace)(this.telemetry, enums_1.SpanKind.PRODUCER, node.queueName, 'addNode', node.queueName, async (span, srcPropagationMedatada) => {
var _a, _b;
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobName]: node.name,
[enums_1.TelemetryAttributes.JobId]: jobId,
});
const opts = node.opts;
let telemetry = opts === null || opts === void 0 ? void 0 : opts.telemetry;
if (srcPropagationMedatada && opts) {
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
const job = new this.Job(queue, node.name, node.data, Object.assign(Object.assign(Object.assign({}, jobsOpts), opts), { parent: parent === null || parent === void 0 ? void 0 : parent.parentOpts, telemetry }), jobId);
const parentKey = (0, utils_1.getParentKey)(parent === null || parent === void 0 ? void 0 : parent.parentOpts);
if (node.children && node.children.length > 0) {
// Create the parent job, it will be a job in status "waiting-children".
const parentId = jobId;
const queueKeysParent = new queue_keys_1.QueueKeys(node.prefix || this.opts.prefix);
await job.addJob(multi, {
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
addToWaitingChildren: true,
parentKey,
});
const parentDependenciesKey = `${queueKeysParent.toKey(node.queueName, parentId)}:dependencies`;
const children = await this.addChildren({
multi,
nodes: node.children,
parent: {
parentOpts: {
id: parentId,
queue: queueKeysParent.getQueueQualifiedName(node.queueName),
},
parentDependenciesKey,
},
queuesOpts,
});
return { job, children };
}
else {
await job.addJob(multi, {
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
parentKey,
});
return { job };
}
});
}
/**
* Adds nodes (jobs) of multiple flows to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param nodes - the nodes representing jobs to be added to some queue
* @returns
*/
addNodes(multi, nodes) {
return Promise.all(nodes.map(node => {
var _a;
const parentOpts = (_a = node === null || node === void 0 ? void 0 : node.opts) === null || _a === void 0 ? void 0 : _a.parent;
const parentKey = (0, utils_1.getParentKey)(parentOpts);
const parentDependenciesKey = parentKey
? `${parentKey}:dependencies`
: undefined;
return this.addNode({
multi,
node,
parent: {
parentOpts,
parentDependenciesKey,
},
});
}));
}
async getNode(client, node) {
const queue = this.queueFromNode(node, new queue_keys_1.QueueKeys(node.prefix), node.prefix);
const job = await this.Job.fromId(queue, node.id);
if (job) {
const { processed = {}, unprocessed = [], failed = [], ignored = {}, } = await job.getDependencies({
failed: {
count: node.maxChildren,
},
processed: {
count: node.maxChildren,
},
unprocessed: {
count: node.maxChildren,
},
ignored: {
count: node.maxChildren,
},
});
const processedKeys = Object.keys(processed);
const ignoredKeys = Object.keys(ignored);
const childrenCount = processedKeys.length +
unprocessed.length +
ignoredKeys.length +
failed.length;
const newDepth = node.depth - 1;
if (childrenCount > 0 && newDepth) {
const children = await this.getChildren(client, [...processedKeys, ...unprocessed, ...failed, ...ignoredKeys], newDepth, node.maxChildren);
return { job, children };
}
else {
return { job };
}
}
}
addChildren({ multi, nodes, parent, queuesOpts }) {
return Promise.all(nodes.map(node => this.addNode({ multi, node, parent, queuesOpts })));
}
getChildren(client, childrenKeys, depth, maxChildren) {
const getChild = (key) => {
const [prefix, queueName, id] = key.split(':');
return this.getNode(client, {
id,
queueName,
prefix,
depth,
maxChildren,
});
};
return Promise.all([...childrenKeys.map(getChild)]);
}
/**
* Helper factory method that creates a queue-like object
* required to create jobs in any queue.
*
* @param node -
* @param queueKeys -
* @returns
*/
queueFromNode(node, queueKeys, prefix) {
return {
client: this.connection.client,
name: node.queueName,
keys: queueKeys.getKeys(node.queueName),
toKey: (type) => queueKeys.toKey(node.queueName, type),
opts: { prefix, connection: {} },
qualifiedName: queueKeys.getQueueQualifiedName(node.queueName),
closing: this.closing,
waitUntilReady: async () => this.connection.client,
removeListener: this.removeListener.bind(this),
emit: this.emit.bind(this),
on: this.on.bind(this),
redisVersion: this.connection.redisVersion,
trace: async () => { },
};
}
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
}
/**
*
* Force disconnects a connection.
*/
disconnect() {
return this.connection.disconnect();
}
}
exports.FlowProducer = FlowProducer;
//# sourceMappingURL=flow-producer.js.map

File diff suppressed because one or more lines are too long

27
backend/node_modules/bullmq/dist/cjs/classes/index.js generated vendored Normal file
View File

@@ -0,0 +1,27 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const tslib_1 = require("tslib");
tslib_1.__exportStar(require("./async-fifo-queue"), exports);
tslib_1.__exportStar(require("./backoffs"), exports);
tslib_1.__exportStar(require("./child"), exports);
tslib_1.__exportStar(require("./child-pool"), exports);
tslib_1.__exportStar(require("./child-processor"), exports);
tslib_1.__exportStar(require("./errors"), exports);
tslib_1.__exportStar(require("./flow-producer"), exports);
tslib_1.__exportStar(require("./job"), exports);
tslib_1.__exportStar(require("./job-scheduler"), exports);
// export * from './main'; this file must not be exported
// export * from './main-worker'; this file must not be exported
tslib_1.__exportStar(require("./lock-manager"), exports);
tslib_1.__exportStar(require("./queue-base"), exports);
tslib_1.__exportStar(require("./queue-events"), exports);
tslib_1.__exportStar(require("./queue-events-producer"), exports);
tslib_1.__exportStar(require("./queue-getters"), exports);
tslib_1.__exportStar(require("./queue-keys"), exports);
tslib_1.__exportStar(require("./queue"), exports);
tslib_1.__exportStar(require("./redis-connection"), exports);
tslib_1.__exportStar(require("./repeat"), exports);
tslib_1.__exportStar(require("./sandbox"), exports);
tslib_1.__exportStar(require("./scripts"), exports);
tslib_1.__exportStar(require("./worker"), exports);
//# sourceMappingURL=index.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/classes/index.ts"],"names":[],"mappings":";;;AAAA,6DAAmC;AACnC,qDAA2B;AAC3B,kDAAwB;AACxB,uDAA6B;AAC7B,4DAAkC;AAClC,mDAAyB;AACzB,0DAAgC;AAChC,gDAAsB;AACtB,0DAAgC;AAChC,yDAAyD;AACzD,gEAAgE;AAChE,yDAA+B;AAC/B,uDAA6B;AAC7B,yDAA+B;AAC/B,kEAAwC;AACxC,0DAAgC;AAChC,uDAA6B;AAC7B,kDAAwB;AACxB,6DAAmC;AACnC,mDAAyB;AACzB,oDAA0B;AAC1B,oDAA0B;AAC1B,mDAAyB"}

View File

@@ -0,0 +1,247 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.defaultRepeatStrategy = exports.JobScheduler = void 0;
const tslib_1 = require("tslib");
const cron_parser_1 = require("cron-parser");
const job_1 = require("./job");
const queue_base_1 = require("./queue-base");
const enums_1 = require("../enums");
const utils_1 = require("../utils");
class JobScheduler extends queue_base_1.QueueBase {
constructor(name, opts, Connection) {
super(name, opts, Connection);
this.repeatStrategy =
(opts.settings && opts.settings.repeatStrategy) || exports.defaultRepeatStrategy;
}
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobName, jobData, opts, { override, producerId }) {
const { every, limit, pattern, offset } = repeatOpts;
if (pattern && every) {
throw new Error('Both .pattern and .every options are defined for this repeatable job');
}
if (!pattern && !every) {
throw new Error('Either .pattern or .every options must be defined for this repeatable job');
}
if (repeatOpts.immediately && repeatOpts.startDate) {
throw new Error('Both .immediately and .startDate options are defined for this repeatable job');
}
if (repeatOpts.immediately && repeatOpts.every) {
console.warn("Using option immediately with every does not affect the job's schedule. Job will run immediately anyway.");
}
// Check if we reached the limit of the repeatable job's iterations
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
if (typeof repeatOpts.limit !== 'undefined' &&
iterationCount > repeatOpts.limit) {
return;
}
// Check if we reached the end date of the repeatable job
let now = Date.now();
const { endDate } = repeatOpts;
if (endDate && now > new Date(endDate).getTime()) {
return;
}
const prevMillis = opts.prevMillis || 0;
now = prevMillis < now ? now : prevMillis;
// Check if we have a start date for the repeatable job
const { immediately } = repeatOpts, filteredRepeatOpts = tslib_1.__rest(repeatOpts, ["immediately"]);
let nextMillis;
const newOffset = null;
if (pattern) {
nextMillis = await this.repeatStrategy(now, repeatOpts, jobName);
if (nextMillis < now) {
nextMillis = now;
}
}
if (nextMillis || every) {
return this.trace(enums_1.SpanKind.PRODUCER, 'add', `${this.name}.${jobName}`, async (span, srcPropagationMedatada) => {
var _a, _b;
let telemetry = opts.telemetry;
if (srcPropagationMedatada) {
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
const mergedOpts = this.getNextJobOpts(nextMillis, jobSchedulerId, Object.assign(Object.assign({}, opts), { repeat: filteredRepeatOpts, telemetry }), iterationCount, newOffset);
if (override) {
// Clamp nextMillis to now if it's in the past
if (nextMillis < now) {
nextMillis = now;
}
const [jobId, delay] = await this.scripts.addJobScheduler(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), job_1.Job.optsAsJSON(opts), {
name: jobName,
startDate: repeatOpts.startDate
? new Date(repeatOpts.startDate).getTime()
: undefined,
endDate: endDate ? new Date(endDate).getTime() : undefined,
tz: repeatOpts.tz,
pattern,
every,
limit,
offset: newOffset,
}, job_1.Job.optsAsJSON(mergedOpts), producerId);
// Ensure delay is a number (Dragonflydb may return it as a string)
const numericDelay = typeof delay === 'string' ? parseInt(delay, 10) : delay;
const job = new this.Job(this, jobName, jobData, Object.assign(Object.assign({}, mergedOpts), { delay: numericDelay }), jobId);
job.id = jobId;
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
[enums_1.TelemetryAttributes.JobId]: job.id,
});
return job;
}
else {
const jobId = await this.scripts.updateJobSchedulerNextMillis(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), job_1.Job.optsAsJSON(mergedOpts), producerId);
if (jobId) {
const job = new this.Job(this, jobName, jobData, mergedOpts, jobId);
job.id = jobId;
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
[enums_1.TelemetryAttributes.JobId]: job.id,
});
return job;
}
}
});
}
}
getNextJobOpts(nextMillis, jobSchedulerId, opts, currentCount, offset) {
var _a, _b;
//
// Generate unique job id for this iteration.
//
const jobId = this.getSchedulerNextJobId({
jobSchedulerId,
nextMillis,
});
const now = Date.now();
const delay = nextMillis + offset - now;
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey: jobSchedulerId });
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { offset, count: currentCount, startDate: ((_a = opts.repeat) === null || _a === void 0 ? void 0 : _a.startDate)
? new Date(opts.repeat.startDate).getTime()
: undefined, endDate: ((_b = opts.repeat) === null || _b === void 0 ? void 0 : _b.endDate)
? new Date(opts.repeat.endDate).getTime()
: undefined });
return mergedOpts;
}
async removeJobScheduler(jobSchedulerId) {
return this.scripts.removeJobScheduler(jobSchedulerId);
}
async getSchedulerData(client, key, next) {
const jobData = await client.hgetall(this.toKey('repeat:' + key));
return this.transformSchedulerData(key, jobData, next);
}
transformSchedulerData(key, jobData, next) {
if (jobData) {
const jobSchedulerData = {
key,
name: jobData.name,
next,
};
if (jobData.ic) {
jobSchedulerData.iterationCount = parseInt(jobData.ic);
}
if (jobData.limit) {
jobSchedulerData.limit = parseInt(jobData.limit);
}
if (jobData.startDate) {
jobSchedulerData.startDate = parseInt(jobData.startDate);
}
if (jobData.endDate) {
jobSchedulerData.endDate = parseInt(jobData.endDate);
}
if (jobData.tz) {
jobSchedulerData.tz = jobData.tz;
}
if (jobData.pattern) {
jobSchedulerData.pattern = jobData.pattern;
}
if (jobData.every) {
jobSchedulerData.every = parseInt(jobData.every);
}
if (jobData.offset) {
jobSchedulerData.offset = parseInt(jobData.offset);
}
if (jobData.data || jobData.opts) {
jobSchedulerData.template = this.getTemplateFromJSON(jobData.data, jobData.opts);
}
return jobSchedulerData;
}
// TODO: remove this check and keyToData as it is here only to support legacy code
if (key.includes(':')) {
return this.keyToData(key, next);
}
}
keyToData(key, next) {
const data = key.split(':');
const pattern = data.slice(4).join(':') || null;
return {
key,
name: data[0],
id: data[1] || null,
endDate: parseInt(data[2]) || null,
tz: data[3] || null,
pattern,
next,
};
}
async getScheduler(id) {
const [rawJobData, next] = await this.scripts.getJobScheduler(id);
return this.transformSchedulerData(id, rawJobData ? (0, utils_1.array2obj)(rawJobData) : null, next ? parseInt(next) : null);
}
getTemplateFromJSON(rawData, rawOpts) {
const template = {};
if (rawData) {
template.data = JSON.parse(rawData);
}
if (rawOpts) {
template.opts = job_1.Job.optsFromJSON(rawOpts);
}
return template;
}
async getJobSchedulers(start = 0, end = -1, asc = false) {
const client = await this.client;
const jobSchedulersKey = this.keys.repeat;
const result = asc
? await client.zrange(jobSchedulersKey, start, end, 'WITHSCORES')
: await client.zrevrange(jobSchedulersKey, start, end, 'WITHSCORES');
const jobs = [];
for (let i = 0; i < result.length; i += 2) {
jobs.push(this.getSchedulerData(client, result[i], parseInt(result[i + 1])));
}
return Promise.all(jobs);
}
async getSchedulersCount() {
const jobSchedulersKey = this.keys.repeat;
const client = await this.client;
return client.zcard(jobSchedulersKey);
}
getSchedulerNextJobId({ nextMillis, jobSchedulerId, }) {
return `repeat:${jobSchedulerId}:${nextMillis}`;
}
}
exports.JobScheduler = JobScheduler;
const defaultRepeatStrategy = (millis, opts) => {
const { pattern } = opts;
const dateFromMillis = new Date(millis);
const startDate = opts.startDate && new Date(opts.startDate);
const currentDate = startDate > dateFromMillis ? startDate : dateFromMillis;
const interval = (0, cron_parser_1.parseExpression)(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
try {
if (opts.immediately) {
return new Date().getTime();
}
else {
return interval.next().getTime();
}
}
catch (e) {
// Ignore error
}
};
exports.defaultRepeatStrategy = defaultRepeatStrategy;
//# sourceMappingURL=job-scheduler.js.map

File diff suppressed because one or more lines are too long

1053
backend/node_modules/bullmq/dist/cjs/classes/job.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,165 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.LockManager = void 0;
const node_abort_controller_1 = require("node-abort-controller");
const enums_1 = require("../enums");
/**
* Manages lock renewal for BullMQ workers.
* It periodically extends locks for active jobs to prevent them from being
* considered stalled by other workers.
*/
class LockManager {
constructor(worker, opts) {
this.worker = worker;
this.opts = opts;
// Maps job ids with their tokens, timestamps, and abort controllers
this.trackedJobs = new Map();
this.closed = false;
}
/**
* Starts the lock manager timers for lock renewal.
*/
start() {
if (this.closed) {
return;
}
// Start lock renewal timer if not disabled
if (this.opts.lockRenewTime > 0) {
this.startLockExtenderTimer();
}
}
async extendLocks(jobIds) {
await this.worker.trace(enums_1.SpanKind.INTERNAL, 'extendLocks', this.worker.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.opts.workerId,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.workerName,
[enums_1.TelemetryAttributes.WorkerJobsToExtendLocks]: jobIds,
});
try {
const jobTokens = jobIds.map(id => { var _a; return ((_a = this.trackedJobs.get(id)) === null || _a === void 0 ? void 0 : _a.token) || ''; });
const erroredJobIds = await this.worker.extendJobLocks(jobIds, jobTokens, this.opts.lockDuration);
if (erroredJobIds.length > 0) {
this.worker.emit('lockRenewalFailed', erroredJobIds);
for (const jobId of erroredJobIds) {
this.worker.emit('error', new Error(`could not renew lock for job ${jobId}`));
}
}
const succeededJobIds = jobIds.filter(id => !erroredJobIds.includes(id));
if (succeededJobIds.length > 0) {
this.worker.emit('locksRenewed', {
count: succeededJobIds.length,
jobIds: succeededJobIds,
});
}
}
catch (err) {
this.worker.emit('error', err);
}
});
}
startLockExtenderTimer() {
clearTimeout(this.lockRenewalTimer);
if (!this.closed) {
this.lockRenewalTimer = setTimeout(async () => {
// Get all the jobs whose locks expire in less than 1/2 of the lockRenewTime
const now = Date.now();
const jobsToExtend = [];
for (const jobId of this.trackedJobs.keys()) {
const tracked = this.trackedJobs.get(jobId);
const { ts, token, abortController } = tracked;
if (!ts) {
this.trackedJobs.set(jobId, { token, ts: now, abortController });
continue;
}
if (ts + this.opts.lockRenewTime / 2 < now) {
this.trackedJobs.set(jobId, { token, ts: now, abortController });
jobsToExtend.push(jobId);
}
}
if (jobsToExtend.length) {
await this.extendLocks(jobsToExtend);
}
this.startLockExtenderTimer();
}, this.opts.lockRenewTime / 2);
}
}
/**
* Stops the lock manager and clears all timers.
*/
async close() {
if (this.closed) {
return;
}
this.closed = true;
if (this.lockRenewalTimer) {
clearTimeout(this.lockRenewalTimer);
this.lockRenewalTimer = undefined;
}
this.trackedJobs.clear();
}
/**
* Adds a job to be tracked for lock renewal.
* Returns an AbortController if shouldCreateController is true, undefined otherwise.
*/
trackJob(jobId, token, ts, shouldCreateController = false) {
const abortController = shouldCreateController
? new node_abort_controller_1.AbortController()
: undefined;
if (!this.closed && jobId) {
this.trackedJobs.set(jobId, { token, ts, abortController });
}
return abortController;
}
/**
* Removes a job from lock renewal tracking.
*/
untrackJob(jobId) {
this.trackedJobs.delete(jobId);
}
/**
* Gets the number of jobs currently being tracked.
*/
getActiveJobCount() {
return this.trackedJobs.size;
}
/**
* Checks if the lock manager is running.
*/
isRunning() {
return !this.closed && this.lockRenewalTimer !== undefined;
}
/**
* Cancels a specific job by aborting its signal.
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId, reason) {
const tracked = this.trackedJobs.get(jobId);
if (tracked === null || tracked === void 0 ? void 0 : tracked.abortController) {
tracked.abortController.abort(reason);
return true;
}
return false;
}
/**
* Cancels all tracked jobs by aborting their signals.
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason) {
for (const tracked of this.trackedJobs.values()) {
if (tracked.abortController) {
tracked.abortController.abort(reason);
}
}
}
/**
* Gets a list of all tracked job IDs.
* @returns Array of job IDs currently being tracked
*/
getTrackedJobIds() {
return Array.from(this.trackedJobs.keys());
}
}
exports.LockManager = LockManager;
//# sourceMappingURL=lock-manager.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"lock-manager.js","sourceRoot":"","sources":["../../../src/classes/lock-manager.ts"],"names":[],"mappings":";;;AAAA,iEAAwD;AACxD,oCAAyD;AAUzD;;;;GAIG;AACH,MAAa,WAAW;IAUtB,YACY,MAAgC,EAChC,IAAwB;QADxB,WAAM,GAAN,MAAM,CAA0B;QAChC,SAAI,GAAJ,IAAI,CAAoB;QATpC,oEAAoE;QAC1D,gBAAW,GAAG,IAAI,GAAG,EAG5B,CAAC;QACM,WAAM,GAAG,KAAK,CAAC;IAKtB,CAAC;IAEJ;;OAEG;IACH,KAAK;QACH,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,2CAA2C;QAC3C,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,EAAE,CAAC;YAChC,IAAI,CAAC,sBAAsB,EAAE,CAAC;QAChC,CAAC;IACH,CAAC;IAES,KAAK,CAAC,WAAW,CAAC,MAAgB;QAC1C,MAAM,IAAI,CAAC,MAAM,CAAC,KAAK,CACrB,gBAAQ,CAAC,QAAQ,EACjB,aAAa,EACb,IAAI,CAAC,MAAM,CAAC,IAAI,EAChB,KAAK,EAAE,IAAW,EAAE,EAAE;YACpB,IAAI,aAAJ,IAAI,uBAAJ,IAAI,CAAE,aAAa,CAAC;gBAClB,CAAC,2BAAmB,CAAC,QAAQ,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,QAAQ;gBAClD,CAAC,2BAAmB,CAAC,UAAU,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,UAAU;gBACtD,CAAC,2BAAmB,CAAC,uBAAuB,CAAC,EAAE,MAAM;aACtD,CAAC,CAAC;YAEH,IAAI,CAAC;gBACH,MAAM,SAAS,GAAG,MAAM,CAAC,GAAG,CAC1B,EAAE,CAAC,EAAE,WAAC,OAAA,CAAA,MAAA,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,0CAAE,KAAK,KAAI,EAAE,CAAA,EAAA,CAC5C,CAAC;gBAEF,MAAM,aAAa,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,cAAc,CACpD,MAAM,EACN,SAAS,EACT,IAAI,CAAC,IAAI,CAAC,YAAY,CACvB,CAAC;gBAEF,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,mBAAmB,EAAE,aAAa,CAAC,CAAC;oBAErD,KAAK,MAAM,KAAK,IAAI,aAAa,EAAE,CAAC;wBAClC,IAAI,CAAC,MAAM,CAAC,IAAI,CACd,OAAO,EACP,IAAI,KAAK,CAAC,gCAAgC,KAAK,EAAE,CAAC,CACnD,CAAC;oBACJ,CAAC;gBACH,CAAC;gBAED,MAAM,eAAe,GAAG,MAAM,CAAC,MAAM,CACnC,EAAE,CAAC,EAAE,CAAC,CAAC,aAAa,CAAC,QAAQ,CAAC,EAAE,CAAC,CAClC,CAAC;gBAEF,IAAI,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC/B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,cAAc,EAAE;wBAC/B,KAAK,EAAE,eAAe,CAAC,MAAM;wBAC7B,MAAM,EAAE,eAAe;qBACxB,CAAC,CAAC;gBACL,CAAC;YACH,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,EAAE,GAAY,CAAC,CAAC;YAC1C,CAAC;QACH,CAAC,CACF,CAAC;IACJ,CAAC;IAEO,sBAAsB;QAC5B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;QAEpC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,gBAAgB,GAAG,UAAU,CAAC,KAAK,IAAI,EAAE;gBAC5C,4EAA4E;gBAC5E,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;gBACvB,MAAM,YAAY,GAAa,EAAE,CAAC;gBAElC,KAAK,MAAM,KAAK,IAAI,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,EAAE,CAAC;oBAC5C,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAE,CAAC;oBAC7C,MAAM,EAAE,EAAE,EAAE,KAAK,EAAE,eAAe,EAAE,GAAG,OAAO,CAAC;oBAC/C,IAAI,CAAC,EAAE,EAAE,CAAC;wBACR,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,SAAS;oBACX,CAAC;oBAED,IAAI,EAAE,GAAG,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,GAAG,GAAG,EAAE,CAAC;wBAC3C,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;oBAC3B,CAAC;gBACH,CAAC;gBAED,IAAI,YAAY,CAAC,MAAM,EAAE,CAAC;oBACxB,MAAM,IAAI,CAAC,WAAW,CAAC,YAAY,CAAC,CAAC;gBACvC,CAAC;gBAED,IAAI,CAAC,sBAAsB,EAAE,CAAC;YAChC,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC,CAAC;QAClC,CAAC;IACH,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QAEnB,IAAI,IAAI,CAAC,gBAAgB,EAAE,CAAC;YAC1B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;YACpC,IAAI,CAAC,gBAAgB,GAAG,SAAS,CAAC;QACpC,CAAC;QAED,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE,CAAC;IAC3B,CAAC;IAED;;;OAGG;IACH,QAAQ,CACN,KAAa,EACb,KAAa,EACb,EAAU,EACV,sBAAsB,GAAG,KAAK;QAE9B,MAAM,eAAe,GAAG,sBAAsB;YAC5C,CAAC,CAAC,IAAI,uCAAe,EAAE;YACvB,CAAC,CAAC,SAAS,CAAC;QACd,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,KAAK,EAAE,CAAC;YAC1B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,eAAe,EAAE,CAAC,CAAC;QAC9D,CAAC;QACD,OAAO,eAAe,CAAC;IACzB,CAAC;IAED;;OAEG;IACH,UAAU,CAAC,KAAa;QACtB,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;IACjC,CAAC;IAED;;OAEG;IACH,iBAAiB;QACf,OAAO,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;IAC/B,CAAC;IAED;;OAEG;IACH,SAAS;QACP,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,gBAAgB,KAAK,SAAS,CAAC;IAC7D,CAAC;IAED;;;;;OAKG;IACH,SAAS,CAAC,KAAa,EAAE,MAAe;QACtC,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;QAC5C,IAAI,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,eAAe,EAAE,CAAC;YAC7B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACtC,OAAO,IAAI,CAAC;QACd,CAAC;QACD,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;;OAGG;IACH,aAAa,CAAC,MAAe;QAC3B,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,EAAE,CAAC;YAChD,IAAI,OAAO,CAAC,eAAe,EAAE,CAAC;gBAC5B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACxC,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;OAGG;IACH,gBAAgB;QACd,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC;IAC7C,CAAC;CACF;AA5MD,kCA4MC"}

View File

@@ -0,0 +1,44 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
/**
* Wrapper for sandboxing.
*
*/
const child_processor_1 = require("./child-processor");
const enums_1 = require("../enums");
const utils_1 = require("../utils");
exports.default = (send, receiver) => {
const childProcessor = new child_processor_1.ChildProcessor(send, receiver);
receiver === null || receiver === void 0 ? void 0 : receiver.on('message', async (msg) => {
try {
switch (msg.cmd) {
case enums_1.ChildCommand.Init:
await childProcessor.init(msg.value);
break;
case enums_1.ChildCommand.Start:
await childProcessor.start(msg.job, msg === null || msg === void 0 ? void 0 : msg.token);
break;
case enums_1.ChildCommand.Stop:
break;
}
}
catch (err) {
console.error('Error handling child message');
}
});
process.on('SIGTERM', () => childProcessor.waitForCurrentJobAndExit());
process.on('SIGINT', () => childProcessor.waitForCurrentJobAndExit());
process.on('uncaughtException', async (err) => {
if (typeof err !== 'object') {
err = new Error((0, utils_1.toString)(err));
}
await send({
cmd: enums_1.ParentCommand.Failed,
value: (0, utils_1.errorToJSON)(err),
});
// An uncaughException leaves this process in a potentially undetermined state so
// we must exit
process.exit();
});
};
//# sourceMappingURL=main-base.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main-base.js","sourceRoot":"","sources":["../../../src/classes/main-base.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,uDAAmD;AACnD,oCAAuD;AACvD,oCAAiD;AAGjD,kBAAe,CAAC,IAAiC,EAAE,QAAkB,EAAE,EAAE;IACvE,MAAM,cAAc,GAAG,IAAI,gCAAc,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;IAE1D,QAAQ,aAAR,QAAQ,uBAAR,QAAQ,CAAE,EAAE,CAAC,SAAS,EAAE,KAAK,EAAC,GAAG,EAAC,EAAE;QAClC,IAAI,CAAC;YACH,QAAQ,GAAG,CAAC,GAAmB,EAAE,CAAC;gBAChC,KAAK,oBAAY,CAAC,IAAI;oBACpB,MAAM,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;oBACrC,MAAM;gBACR,KAAK,oBAAY,CAAC,KAAK;oBACrB,MAAM,cAAc,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,aAAH,GAAG,uBAAH,GAAG,CAAE,KAAK,CAAC,CAAC;oBAChD,MAAM;gBACR,KAAK,oBAAY,CAAC,IAAI;oBACpB,MAAM;YACV,CAAC;QACH,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,8BAA8B,CAAC,CAAC;QAChD,CAAC;IACH,CAAC,CAAC,CAAC;IAEH,OAAO,CAAC,EAAE,CAAC,SAAS,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IACvE,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IAEtE,OAAO,CAAC,EAAE,CAAC,mBAAmB,EAAE,KAAK,EAAE,GAAQ,EAAE,EAAE;QACjD,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE,CAAC;YAC5B,GAAG,GAAG,IAAI,KAAK,CAAC,IAAA,gBAAQ,EAAC,GAAG,CAAC,CAAC,CAAC;QACjC,CAAC;QAED,MAAM,IAAI,CAAC;YACT,GAAG,EAAE,qBAAa,CAAC,MAAM;YACzB,KAAK,EAAE,IAAA,mBAAW,EAAC,GAAG,CAAC;SACxB,CAAC,CAAC;QAEH,iFAAiF;QACjF,eAAe;QACf,OAAO,CAAC,IAAI,EAAE,CAAC;IACjB,CAAC,CAAC,CAAC;AACL,CAAC,CAAC"}

View File

@@ -0,0 +1 @@
export {};

View File

@@ -0,0 +1,10 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
/**
* Worker Thread wrapper for sandboxing
*
*/
const worker_threads_1 = require("worker_threads");
const main_base_1 = require("./main-base");
(0, main_base_1.default)(async (msg) => worker_threads_1.parentPort.postMessage(msg), worker_threads_1.parentPort);
//# sourceMappingURL=main-worker.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main-worker.js","sourceRoot":"","sources":["../../../src/classes/main-worker.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,mDAA4C;AAC5C,2CAAmC;AAEnC,IAAA,mBAAQ,EAAC,KAAK,EAAE,GAAQ,EAAE,EAAE,CAAC,2BAAU,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,2BAAU,CAAC,CAAC"}

View File

@@ -0,0 +1 @@
export {};

10
backend/node_modules/bullmq/dist/cjs/classes/main.js generated vendored Normal file
View File

@@ -0,0 +1,10 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
/**
* Child process wrapper for sandboxing.
*
*/
const utils_1 = require("../utils");
const main_base_1 = require("./main-base");
(0, main_base_1.default)((msg) => (0, utils_1.childSend)(process, msg), process);
//# sourceMappingURL=main.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main.js","sourceRoot":"","sources":["../../../src/classes/main.ts"],"names":[],"mappings":";;AAAA;;;GAGG;AACH,oCAAqC;AACrC,2CAAmC;AAEnC,IAAA,mBAAQ,EAAC,CAAC,GAAQ,EAAE,EAAE,CAAC,IAAA,iBAAS,EAAC,OAAO,EAAE,GAAG,CAAC,EAAE,OAAO,CAAC,CAAC"}

View File

@@ -0,0 +1,157 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueBase = void 0;
const events_1 = require("events");
const utils_1 = require("../utils");
const create_scripts_1 = require("../utils/create-scripts");
const redis_connection_1 = require("./redis-connection");
const job_1 = require("./job");
const queue_keys_1 = require("./queue-keys");
/**
* Base class for all classes that need to interact with queues.
* This class is normally not used directly, but extended by the other classes.
*
*/
class QueueBase extends events_1.EventEmitter {
/**
*
* @param name - The name of the queue.
* @param opts - Options for the queue.
* @param Connection - An optional "Connection" class used to instantiate a Connection. This is useful for
* testing with mockups and/or extending the Connection class and passing an alternate implementation.
*/
constructor(name, opts = { connection: {} }, Connection = redis_connection_1.RedisConnection, hasBlockingConnection = false) {
super();
this.name = name;
this.opts = opts;
this.closed = false;
this.hasBlockingConnection = false;
this.hasBlockingConnection = hasBlockingConnection;
this.opts = Object.assign({ prefix: 'bull' }, opts);
if (!name) {
throw new Error('Queue name must be provided');
}
if (name.includes(':')) {
throw new Error('Queue name cannot contain :');
}
this.connection = new Connection(opts.connection, {
shared: (0, utils_1.isRedisInstance)(opts.connection),
blocking: hasBlockingConnection,
skipVersionCheck: opts.skipVersionCheck,
skipWaitingForReady: opts.skipWaitingForReady,
});
this.connection.on('error', (error) => this.emit('error', error));
this.connection.on('close', () => {
if (!this.closing) {
this.emit('ioredis:close');
}
});
const queueKeys = new queue_keys_1.QueueKeys(opts.prefix);
this.qualifiedName = queueKeys.getQueueQualifiedName(name);
this.keys = queueKeys.getKeys(name);
this.toKey = (type) => queueKeys.toKey(name, type);
this.createScripts();
}
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client() {
return this.connection.client;
}
createScripts() {
this.scripts = (0, create_scripts_1.createScripts)(this);
}
/**
* Returns the version of the Redis instance the client is connected to,
*/
get redisVersion() {
return this.connection.redisVersion;
}
/**
* Helper to easily extend Job class calls.
*/
get Job() {
return job_1.Job;
}
/**
* Emits an event. Normally used by subclasses to emit events.
*
* @param event - The emitted event.
* @param args -
* @returns
*/
emit(event, ...args) {
try {
return super.emit(event, ...args);
}
catch (err) {
try {
return super.emit('error', err);
}
catch (err) {
// We give up if the error event also throws an exception.
console.error(err);
return false;
}
}
}
waitUntilReady() {
return this.client;
}
base64Name() {
return Buffer.from(this.name).toString('base64');
}
clientName(suffix = '') {
const queueNameBase64 = this.base64Name();
return `${this.opts.prefix}:${queueNameBase64}${suffix}`;
}
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
this.closed = true;
}
/**
*
* Force disconnects a connection.
*/
disconnect() {
return this.connection.disconnect();
}
async checkConnectionError(fn, delayInMs = utils_1.DELAY_TIME_5) {
try {
return await fn();
}
catch (error) {
if ((0, utils_1.isNotConnectionError)(error)) {
this.emit('error', error);
}
if (!this.closing && delayInMs) {
await (0, utils_1.delay)(delayInMs);
}
else {
return;
}
}
}
/**
* Wraps the code with telemetry and provides a span for configuration.
*
* @param spanKind - kind of the span: Producer, Consumer, Internal
* @param operation - operation name (such as add, process, etc)
* @param destination - destination name (normally the queue name)
* @param callback - code to wrap with telemetry
* @param srcPropagationMedatada -
* @returns
*/
trace(spanKind, operation, destination, callback, srcPropagationMetadata) {
return (0, utils_1.trace)(this.opts.telemetry, spanKind, this.name, operation, destination, callback, srcPropagationMetadata);
}
}
exports.QueueBase = QueueBase;
//# sourceMappingURL=queue-base.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-base.js","sourceRoot":"","sources":["../../../src/classes/queue-base.ts"],"names":[],"mappings":";;;AAAA,mCAAsC;AAQtC,oCAMkB;AAClB,4DAAwD;AACxD,yDAAqD;AACrD,+BAA4B;AAC5B,6CAAkD;AAIlD;;;;GAIG;AACH,MAAa,SAAU,SAAQ,qBAAY;IAWzC;;;;;;OAMG;IACH,YACkB,IAAY,EACrB,OAAyB,EAAE,UAAU,EAAE,EAAE,EAAE,EAClD,aAAqC,kCAAe,EACpD,qBAAqB,GAAG,KAAK;QAE7B,KAAK,EAAE,CAAC;QALQ,SAAI,GAAJ,IAAI,CAAQ;QACrB,SAAI,GAAJ,IAAI,CAAuC;QAf1C,WAAM,GAAG,KAAK,CAAC;QACf,0BAAqB,GAAG,KAAK,CAAC;QAoBtC,IAAI,CAAC,qBAAqB,GAAG,qBAAqB,CAAC;QACnD,IAAI,CAAC,IAAI,mBACP,MAAM,EAAE,MAAM,IACX,IAAI,CACR,CAAC;QAEF,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACvB,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,CAAC,UAAU,GAAG,IAAI,UAAU,CAAC,IAAI,CAAC,UAAU,EAAE;YAChD,MAAM,EAAE,IAAA,uBAAe,EAAC,IAAI,CAAC,UAAU,CAAC;YACxC,QAAQ,EAAE,qBAAqB;YAC/B,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,mBAAmB,EAAE,IAAI,CAAC,mBAAmB;SAC9C,CAAC,CAAC;QAEH,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,KAAY,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACzE,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE;YAC/B,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;gBAClB,IAAI,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;YAC7B,CAAC;QACH,CAAC,CAAC,CAAC;QAEH,MAAM,SAAS,GAAG,IAAI,sBAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QAC7C,IAAI,CAAC,aAAa,GAAG,SAAS,CAAC,qBAAqB,CAAC,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,IAAI,GAAG,SAAS,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;QACpC,IAAI,CAAC,KAAK,GAAG,CAAC,IAAY,EAAE,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,aAAa,EAAE,CAAC;IACvB,CAAC;IAED;;OAEG;IACH,IAAI,MAAM;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC;IAChC,CAAC;IAES,aAAa;QACrB,IAAI,CAAC,OAAO,GAAG,IAAA,8BAAa,EAAC,IAAI,CAAC,CAAC;IACrC,CAAC;IAED;;OAEG;IACH,IAAI,YAAY;QACd,OAAO,IAAI,CAAC,UAAU,CAAC,YAAY,CAAC;IACtC,CAAC;IAED;;OAEG;IACH,IAAc,GAAG;QACf,OAAO,SAAG,CAAC;IACb,CAAC;IAED;;;;;;OAMG;IACH,IAAI,CAAC,KAAsB,EAAE,GAAG,IAAW;QACzC,IAAI,CAAC;YACH,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;QACpC,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,IAAI,CAAC;gBACH,OAAO,KAAK,CAAC,IAAI,CAAC,OAAO,EAAE,GAAG,CAAC,CAAC;YAClC,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,0DAA0D;gBAC1D,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;gBACnB,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC;IACH,CAAC;IAED,cAAc;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC;IACrB,CAAC;IAES,UAAU;QAClB,OAAO,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACnD,CAAC;IAES,UAAU,CAAC,MAAM,GAAG,EAAE;QAC9B,MAAM,eAAe,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QAC1C,OAAO,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,eAAe,GAAG,MAAM,EAAE,CAAC;IAC3D,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;QACnB,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;IACrB,CAAC;IAED;;;OAGG;IACH,UAAU;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;IACtC,CAAC;IAES,KAAK,CAAC,oBAAoB,CAClC,EAAoB,EACpB,SAAS,GAAG,oBAAY;QAExB,IAAI,CAAC;YACH,OAAO,MAAM,EAAE,EAAE,CAAC;QACpB,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,IAAI,IAAA,4BAAoB,EAAC,KAAc,CAAC,EAAE,CAAC;gBACzC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAS,KAAK,CAAC,CAAC;YACnC,CAAC;YAED,IAAI,CAAC,IAAI,CAAC,OAAO,IAAI,SAAS,EAAE,CAAC;gBAC/B,MAAM,IAAA,aAAK,EAAC,SAAS,CAAC,CAAC;YACzB,CAAC;iBAAM,CAAC;gBACN,OAAO;YACT,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;;;;;;OASG;IACH,KAAK,CACH,QAAkB,EAClB,SAAiB,EACjB,WAAmB,EACnB,QAA0E,EAC1E,sBAA+B;QAE/B,OAAO,IAAA,aAAK,EACV,IAAI,CAAC,IAAI,CAAC,SAAS,EACnB,QAAQ,EACR,IAAI,CAAC,IAAI,EACT,SAAS,EACT,WAAW,EACX,QAAQ,EACR,sBAAsB,CACvB,CAAC;IACJ,CAAC;CACF;AA1LD,8BA0LC"}

View File

@@ -0,0 +1,42 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueEventsProducer = void 0;
const tslib_1 = require("tslib");
const queue_base_1 = require("./queue-base");
/**
* The QueueEventsProducer class is used for publishing custom events.
*/
class QueueEventsProducer extends queue_base_1.QueueBase {
constructor(name, opts = {
connection: {},
}, Connection) {
super(name, Object.assign({ blockingConnection: false }, opts), Connection);
this.opts = opts;
}
/**
* Publish custom event to be processed in QueueEvents.
* @param argsObj - Event payload
* @param maxEvents - Max quantity of events to be saved
*/
async publishEvent(argsObj, maxEvents = 1000) {
const client = await this.client;
const key = this.keys.events;
const { eventName } = argsObj, restArgs = tslib_1.__rest(argsObj, ["eventName"]);
const args = ['MAXLEN', '~', maxEvents, '*', 'event', eventName];
for (const [key, value] of Object.entries(restArgs)) {
args.push(key, value);
}
await client.xadd(key, ...args);
}
/**
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
}
}
exports.QueueEventsProducer = QueueEventsProducer;
//# sourceMappingURL=queue-events-producer.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-events-producer.js","sourceRoot":"","sources":["../../../src/classes/queue-events-producer.ts"],"names":[],"mappings":";;;;AACA,6CAAyC;AAGzC;;GAEG;AACH,MAAa,mBAAoB,SAAQ,sBAAS;IAChD,YACE,IAAY,EACZ,OAAmC;QACjC,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;QAEnC,KAAK,CACH,IAAI,kBAEF,kBAAkB,EAAE,KAAK,IACtB,IAAI,GAET,UAAU,CACX,CAAC;QAEF,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,YAAY,CAChB,OAAU,EACV,SAAS,GAAG,IAAI;QAEhB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;QACjC,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,MAAM,EAAE,SAAS,KAAkB,OAAO,EAApB,QAAQ,kBAAK,OAAO,EAApC,aAA0B,CAAU,CAAC;QAC3C,MAAM,IAAI,GAAU,CAAC,QAAQ,EAAE,GAAG,EAAE,SAAS,EAAE,GAAG,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;QAExE,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;YACpD,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;QACxB,CAAC;QAED,MAAM,MAAM,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,CAAC;IAClC,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;IACrB,CAAC;CACF;AAlDD,kDAkDC"}

View File

@@ -0,0 +1,139 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueEvents = void 0;
const tslib_1 = require("tslib");
const utils_1 = require("../utils");
const queue_base_1 = require("./queue-base");
/**
* The QueueEvents class is used for listening to the global events
* emitted by a given queue.
*
* This class requires a dedicated redis connection.
*
*/
class QueueEvents extends queue_base_1.QueueBase {
constructor(name, _a = {
connection: {},
}, Connection) {
var { connection, autorun = true } = _a, opts = tslib_1.__rest(_a, ["connection", "autorun"]);
super(name, Object.assign(Object.assign({}, opts), { connection: (0, utils_1.isRedisInstance)(connection)
? connection.duplicate()
: connection }), Connection, true);
this.running = false;
this.blocking = false;
this.opts = Object.assign({
blockingTimeout: 10000,
}, this.opts);
if (autorun) {
this.run().catch(error => this.emit('error', error));
}
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Manually starts running the event consumming loop. This shall be used if you do not
* use the default "autorun" option on the constructor.
*/
async run() {
if (!this.running) {
try {
this.running = true;
const client = await this.client;
// TODO: Planed for deprecation as it has no really a use case
try {
await client.client('SETNAME', this.clientName(utils_1.QUEUE_EVENT_SUFFIX));
}
catch (err) {
if (!utils_1.clientCommandMessageReg.test(err.message)) {
throw err;
}
}
await this.consumeEvents(client);
}
catch (error) {
this.running = false;
throw error;
}
}
else {
throw new Error('Queue Events is already running.');
}
}
async consumeEvents(client) {
const opts = this.opts;
const key = this.keys.events;
let id = opts.lastEventId || '$';
while (!this.closing) {
this.blocking = true;
// Cast to actual return type, see: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/44301
const data = await this.checkConnectionError(() => client.xread('BLOCK', opts.blockingTimeout, 'STREAMS', key, id));
this.blocking = false;
if (data) {
const stream = data[0];
const events = stream[1];
for (let i = 0; i < events.length; i++) {
id = events[i][0];
const args = (0, utils_1.array2obj)(events[i][1]);
//
// TODO: we may need to have a separate xtream for progress data
// to avoid this hack.
switch (args.event) {
case 'progress':
args.data = JSON.parse(args.data);
break;
case 'completed':
args.returnvalue = JSON.parse(args.returnvalue);
break;
}
const { event } = args, restArgs = tslib_1.__rest(args, ["event"]);
if (event === 'drained') {
this.emit(event, id);
}
else {
this.emit(event, restArgs, id);
if (restArgs.jobId) {
this.emit(`${event}:${restArgs.jobId}`, restArgs, id);
}
}
}
}
}
}
/**
* Stops consuming events and close the underlying Redis connection if necessary.
*
* @returns
*/
async close() {
if (!this.closing) {
this.closing = (async () => {
try {
// As the connection has been wrongly markes as "shared" by QueueBase,
// we need to forcibly close it here. We should fix QueueBase to avoid this in the future.
const client = await this.client;
client.disconnect();
await this.connection.close(this.blocking);
}
finally {
this.closed = true;
}
})();
}
return this.closing;
}
}
exports.QueueEvents = QueueEvents;
//# sourceMappingURL=queue-events.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-events.js","sourceRoot":"","sources":["../../../src/classes/queue-events.ts"],"names":[],"mappings":";;;;AAOA,oCAKkB;AAClB,6CAAyC;AA8PzC;;;;;;GAMG;AACH,MAAa,WAAY,SAAQ,sBAAS;IAIxC,YACE,IAAY,EACZ,KAA8D;QAC5D,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;YAHnC,EAAE,UAAU,EAAE,OAAO,GAAG,IAAI,OAE3B,EAFgC,IAAI,sBAArC,yBAAuC,CAAF;QAKrC,KAAK,CACH,IAAI,kCAEC,IAAI,KACP,UAAU,EAAE,IAAA,uBAAe,EAAC,UAAU,CAAC;gBACrC,CAAC,CAAe,UAAW,CAAC,SAAS,EAAE;gBACvC,CAAC,CAAC,UAAU,KAEhB,UAAU,EACV,IAAI,CACL,CAAC;QApBI,YAAO,GAAG,KAAK,CAAC;QAChB,aAAQ,GAAG,KAAK,CAAC;QAqBvB,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CACvB;YACE,eAAe,EAAE,KAAK;SACvB,EACD,IAAI,CAAC,IAAI,CACV,CAAC;QAEF,IAAI,OAAO,EAAE,CAAC;YACZ,IAAI,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACvD,CAAC;IACH,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,GAAG,IAA8B;QAC3C,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;IACpC,CAAC;IAED,GAAG,CAGD,SAAY,EAAE,QAAgB;QAC9B,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,QAAoC,CAAC,CAAC;QAC3D,OAAO,IAAI,CAAC;IACd,CAAC;IAED,EAAE,CAGA,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACtD,OAAO,IAAI,CAAC;IACd,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACxD,OAAO,IAAI,CAAC;IACd,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,GAAG;QACP,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC;gBACH,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC;gBACpB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;gBAEjC,8DAA8D;gBAC9D,IAAI,CAAC;oBACH,MAAM,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,0BAAkB,CAAC,CAAC,CAAC;gBACtE,CAAC;gBAAC,OAAO,GAAG,EAAE,CAAC;oBACb,IAAI,CAAC,+BAAuB,CAAC,IAAI,CAAS,GAAI,CAAC,OAAO,CAAC,EAAE,CAAC;wBACxD,MAAM,GAAG,CAAC;oBACZ,CAAC;gBACH,CAAC;gBAED,MAAM,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;YACnC,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;gBACrB,MAAM,KAAK,CAAC;YACd,CAAC;QACH,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC,CAAC;QACtD,CAAC;IACH,CAAC;IAEO,KAAK,CAAC,aAAa,CAAC,MAAmB;QAC7C,MAAM,IAAI,GAAuB,IAAI,CAAC,IAAI,CAAC;QAE3C,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,IAAI,EAAE,GAAG,IAAI,CAAC,WAAW,IAAI,GAAG,CAAC;QAEjC,OAAO,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,mGAAmG;YACnG,MAAM,IAAI,GAAkB,MAAM,IAAI,CAAC,oBAAoB,CAAC,GAAG,EAAE,CAC/D,MAAM,CAAC,KAAK,CAAC,OAAO,EAAE,IAAI,CAAC,eAAgB,EAAE,SAAS,EAAE,GAAG,EAAE,EAAE,CAAC,CACjE,CAAC;YACF,IAAI,CAAC,QAAQ,GAAG,KAAK,CAAC;YACtB,IAAI,IAAI,EAAE,CAAC;gBACT,MAAM,MAAM,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;gBACvB,MAAM,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;gBAEzB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;oBACvC,EAAE,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAClB,MAAM,IAAI,GAAG,IAAA,iBAAS,EAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAErC,EAAE;oBACF,gEAAgE;oBAChE,sBAAsB;oBACtB,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;wBACnB,KAAK,UAAU;4BACb,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;4BAClC,MAAM;wBACR,KAAK,WAAW;4BACd,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;4BAChD,MAAM;oBACV,CAAC;oBAED,MAAM,EAAE,KAAK,KAAkB,IAAI,EAAjB,QAAQ,kBAAK,IAAI,EAA7B,SAAsB,CAAO,CAAC;oBAEpC,IAAI,KAAK,KAAK,SAAS,EAAE,CAAC;wBACxB,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;oBACvB,CAAC;yBAAM,CAAC;wBACN,IAAI,CAAC,IAAI,CAAC,KAAY,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBACtC,IAAI,QAAQ,CAAC,KAAK,EAAE,CAAC;4BACnB,IAAI,CAAC,IAAI,CAAC,GAAG,KAAK,IAAI,QAAQ,CAAC,KAAK,EAAS,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBAC/D,CAAC;oBACH,CAAC;gBACH,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,CAAC,KAAK,IAAI,EAAE;gBACzB,IAAI,CAAC;oBACH,sEAAsE;oBACtE,0FAA0F;oBAC1F,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;oBACjC,MAAM,CAAC,UAAU,EAAE,CAAC;oBACpB,MAAM,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;gBAC7C,CAAC;wBAAS,CAAC;oBACT,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;gBACrB,CAAC;YACH,CAAC,CAAC,EAAE,CAAC;QACP,CAAC;QACD,OAAO,IAAI,CAAC,OAAO,CAAC;IACtB,CAAC;CACF;AApKD,kCAoKC"}

View File

@@ -0,0 +1,509 @@
/*eslint-env node */
'use strict';
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueGetters = void 0;
const tslib_1 = require("tslib");
const queue_base_1 = require("./queue-base");
const utils_1 = require("../utils");
/**
* Provides different getters for different aspects of a queue.
*/
class QueueGetters extends queue_base_1.QueueBase {
getJob(jobId) {
return this.Job.fromId(this, jobId);
}
commandByType(types, count, callback) {
return types.map((type) => {
type = type === 'waiting' ? 'wait' : type; // alias
const key = this.toKey(type);
switch (type) {
case 'completed':
case 'failed':
case 'delayed':
case 'prioritized':
case 'repeat':
case 'waiting-children':
return callback(key, count ? 'zcard' : 'zrange');
case 'active':
case 'wait':
case 'paused':
return callback(key, count ? 'llen' : 'lrange');
}
});
}
sanitizeJobTypes(types) {
const currentTypes = typeof types === 'string' ? [types] : types;
if (Array.isArray(currentTypes) && currentTypes.length > 0) {
const sanitizedTypes = [...currentTypes];
if (sanitizedTypes.indexOf('waiting') !== -1) {
sanitizedTypes.push('paused');
}
return [...new Set(sanitizedTypes)];
}
return [
'active',
'completed',
'delayed',
'failed',
'paused',
'prioritized',
'waiting',
'waiting-children',
];
}
/**
Returns the number of jobs waiting to be processed. This includes jobs that are
"waiting" or "delayed" or "prioritized" or "waiting-children".
*/
async count() {
const count = await this.getJobCountByTypes('waiting', 'paused', 'delayed', 'prioritized', 'waiting-children');
return count;
}
/**
* Returns the time to live for a rate limited key in milliseconds.
* @param maxJobs - max jobs to be considered in rate limit state. If not passed
* it will return the remaining ttl without considering if max jobs is excedeed.
* @returns -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
* @see {@link https://redis.io/commands/pttl/}
*/
async getRateLimitTtl(maxJobs) {
return this.scripts.getRateLimitTtl(maxJobs);
}
/**
* Get jobId that starts debounced state.
* @deprecated use getDeduplicationJobId method
*
* @param id - debounce identifier
*/
async getDebounceJobId(id) {
const client = await this.client;
return client.get(`${this.keys.de}:${id}`);
}
/**
* Get jobId from deduplicated state.
*
* @param id - deduplication identifier
*/
async getDeduplicationJobId(id) {
const client = await this.client;
return client.get(`${this.keys.de}:${id}`);
}
/**
* Get global concurrency value.
* Returns null in case no value is set.
*/
async getGlobalConcurrency() {
const client = await this.client;
const concurrency = await client.hget(this.keys.meta, 'concurrency');
if (concurrency) {
return Number(concurrency);
}
return null;
}
/**
* Get global rate limit values.
* Returns null in case no value is set.
*/
async getGlobalRateLimit() {
const client = await this.client;
const [max, duration] = await client.hmget(this.keys.meta, 'max', 'duration');
if (max && duration) {
return {
max: Number(max),
duration: Number(duration),
};
}
return null;
}
/**
* Job counts by type
*
* Queue#getJobCountByTypes('completed') =\> completed count
* Queue#getJobCountByTypes('completed', 'failed') =\> completed + failed count
* Queue#getJobCountByTypes('completed', 'waiting', 'failed') =\> completed + waiting + failed count
*/
async getJobCountByTypes(...types) {
const result = await this.getJobCounts(...types);
return Object.values(result).reduce((sum, count) => sum + count, 0);
}
/**
* Returns the job counts for each type specified or every list/set in the queue by default.
*
* @returns An object, key (type) and value (count)
*/
async getJobCounts(...types) {
const currentTypes = this.sanitizeJobTypes(types);
const responses = await this.scripts.getCounts(currentTypes);
const counts = {};
responses.forEach((res, index) => {
counts[currentTypes[index]] = res || 0;
});
return counts;
}
/**
* Get current job state.
*
* @param jobId - job identifier.
* @returns Returns one of these values:
* 'completed', 'failed', 'delayed', 'active', 'waiting', 'waiting-children', 'unknown'.
*/
getJobState(jobId) {
return this.scripts.getState(jobId);
}
/**
* Get global queue configuration.
*
* @returns Returns the global queue configuration.
*/
async getMeta() {
const client = await this.client;
const config = await client.hgetall(this.keys.meta);
const { concurrency, max, duration, paused, 'opts.maxLenEvents': maxLenEvents } = config, rest = tslib_1.__rest(config, ["concurrency", "max", "duration", "paused", 'opts.maxLenEvents']);
const parsedConfig = rest;
if (concurrency) {
parsedConfig['concurrency'] = Number(concurrency);
}
if (maxLenEvents) {
parsedConfig['maxLenEvents'] = Number(maxLenEvents);
}
if (max) {
parsedConfig['max'] = Number(max);
}
if (duration) {
parsedConfig['duration'] = Number(duration);
}
parsedConfig['paused'] = paused === '1';
return parsedConfig;
}
/**
* @returns Returns the number of jobs in completed status.
*/
getCompletedCount() {
return this.getJobCountByTypes('completed');
}
/**
* Returns the number of jobs in failed status.
*/
getFailedCount() {
return this.getJobCountByTypes('failed');
}
/**
* Returns the number of jobs in delayed status.
*/
getDelayedCount() {
return this.getJobCountByTypes('delayed');
}
/**
* Returns the number of jobs in active status.
*/
getActiveCount() {
return this.getJobCountByTypes('active');
}
/**
* Returns the number of jobs in prioritized status.
*/
getPrioritizedCount() {
return this.getJobCountByTypes('prioritized');
}
/**
* Returns the number of jobs per priority.
*/
async getCountsPerPriority(priorities) {
const uniquePriorities = [...new Set(priorities)];
const responses = await this.scripts.getCountsPerPriority(uniquePriorities);
const counts = {};
responses.forEach((res, index) => {
counts[`${uniquePriorities[index]}`] = res || 0;
});
return counts;
}
/**
* Returns the number of jobs in waiting or paused statuses.
*/
getWaitingCount() {
return this.getJobCountByTypes('waiting');
}
/**
* Returns the number of jobs in waiting-children status.
*/
getWaitingChildrenCount() {
return this.getJobCountByTypes('waiting-children');
}
/**
* Returns the jobs that are in the "waiting" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaiting(start = 0, end = -1) {
return this.getJobs(['waiting'], start, end, true);
}
/**
* Returns the jobs that are in the "waiting-children" status.
* I.E. parent jobs that have at least one child that has not completed yet.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaitingChildren(start = 0, end = -1) {
return this.getJobs(['waiting-children'], start, end, true);
}
/**
* Returns the jobs that are in the "active" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getActive(start = 0, end = -1) {
return this.getJobs(['active'], start, end, true);
}
/**
* Returns the jobs that are in the "delayed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getDelayed(start = 0, end = -1) {
return this.getJobs(['delayed'], start, end, true);
}
/**
* Returns the jobs that are in the "prioritized" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getPrioritized(start = 0, end = -1) {
return this.getJobs(['prioritized'], start, end, true);
}
/**
* Returns the jobs that are in the "completed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getCompleted(start = 0, end = -1) {
return this.getJobs(['completed'], start, end, false);
}
/**
* Returns the jobs that are in the "failed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getFailed(start = 0, end = -1) {
return this.getJobs(['failed'], start, end, false);
}
/**
* Returns the qualified job ids and the raw job data (if available) of the
* children jobs of the given parent job.
* It is possible to get either the already processed children, in this case
* an array of qualified job ids and their result values will be returned,
* or the pending children, in this case an array of qualified job ids will
* be returned.
* A qualified job id is a string representing the job id in a given queue,
* for example: "bull:myqueue:jobid".
*
* @param parentId - The id of the parent job
* @param type - "processed" | "pending"
* @param opts - Options for the query.
*
* @returns an object with the following shape:
* `{ items: { id: string, v?: any, err?: string } [], jobs: JobJsonRaw[], total: number}`
*/
async getDependencies(parentId, type, start, end) {
const key = this.toKey(type == 'processed'
? `${parentId}:processed`
: `${parentId}:dependencies`);
const { items, total, jobs } = await this.scripts.paginate(key, {
start,
end,
fetchJobs: true,
});
return {
items,
jobs,
total,
};
}
async getRanges(types, start = 0, end = 1, asc = false) {
const multiCommands = [];
this.commandByType(types, false, (key, command) => {
switch (command) {
case 'lrange':
multiCommands.push('lrange');
break;
case 'zrange':
multiCommands.push('zrange');
break;
}
});
const responses = await this.scripts.getRanges(types, start, end, asc);
let results = [];
responses.forEach((response, index) => {
const result = response || [];
if (asc && multiCommands[index] === 'lrange') {
results = results.concat(result.reverse());
}
else {
results = results.concat(result);
}
});
return [...new Set(results)];
}
/**
* Returns the jobs that are on the given statuses (note that JobType is synonym for job status)
* @param types - the statuses of the jobs to return.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
async getJobs(types, start = 0, end = -1, asc = false) {
const currentTypes = this.sanitizeJobTypes(types);
const jobIds = await this.getRanges(currentTypes, start, end, asc);
return Promise.all(jobIds.map(jobId => this.Job.fromId(this, jobId)));
}
/**
* Returns the logs for a given Job.
* @param jobId - the id of the job to get the logs for.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
async getJobLogs(jobId, start = 0, end = -1, asc = true) {
const client = await this.client;
const multi = client.multi();
const logsKey = this.toKey(jobId + ':logs');
if (asc) {
multi.lrange(logsKey, start, end);
}
else {
multi.lrange(logsKey, -(end + 1), -(start + 1));
}
multi.llen(logsKey);
const result = (await multi.exec());
if (!asc) {
result[0][1].reverse();
}
return {
logs: result[0][1],
count: result[1][1],
};
}
async baseGetClients(matcher) {
const client = await this.client;
try {
const clients = (await client.client('LIST'));
const list = this.parseClientList(clients, matcher);
return list;
}
catch (err) {
if (!utils_1.clientCommandMessageReg.test(err.message)) {
throw err;
}
return [{ name: 'GCP does not support client list' }];
}
}
/**
* Get the worker list related to the queue. i.e. all the known
* workers that are available to process jobs for this queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @returns - Returns an array with workers info.
*/
getWorkers() {
const unnamedWorkerClientName = `${this.clientName()}`;
const namedWorkerClientName = `${this.clientName()}:w:`;
const matcher = (name) => name &&
(name === unnamedWorkerClientName ||
name.startsWith(namedWorkerClientName));
return this.baseGetClients(matcher);
}
/**
* Returns the current count of workers for the queue.
*
* getWorkersCount(): Promise<number>
*
*/
async getWorkersCount() {
const workers = await this.getWorkers();
return workers.length;
}
/**
* Get queue events list related to the queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @deprecated do not use this method, it will be removed in the future.
*
* @returns - Returns an array with queue events info.
*/
async getQueueEvents() {
const clientName = `${this.clientName()}${utils_1.QUEUE_EVENT_SUFFIX}`;
return this.baseGetClients((name) => name === clientName);
}
/**
* Get queue metrics related to the queue.
*
* This method returns the gathered metrics for the queue.
* The metrics are represented as an array of job counts
* per unit of time (1 minute).
*
* @param start - Start point of the metrics, where 0
* is the newest point to be returned.
* @param end - End point of the metrics, where -1 is the
* oldest point to be returned.
*
* @returns - Returns an object with queue metrics.
*/
async getMetrics(type, start = 0, end = -1) {
const [meta, data, count] = await this.scripts.getMetrics(type, start, end);
return {
meta: {
count: parseInt(meta[0] || '0', 10),
prevTS: parseInt(meta[1] || '0', 10),
prevCount: parseInt(meta[2] || '0', 10),
},
data: data.map(point => +point || 0),
count,
};
}
parseClientList(list, matcher) {
const lines = list.split(/\r?\n/);
const clients = [];
lines.forEach((line) => {
const client = {};
const keyValues = line.split(' ');
keyValues.forEach(function (keyValue) {
const index = keyValue.indexOf('=');
const key = keyValue.substring(0, index);
const value = keyValue.substring(index + 1);
client[key] = value;
});
const name = client['name'];
if (matcher(name)) {
client['name'] = this.name;
client['rawname'] = name;
clients.push(client);
}
});
return clients;
}
/**
* Export the metrics for the queue in the Prometheus format.
* Automatically exports all the counts returned by getJobCounts().
*
* @returns - Returns a string with the metrics in the Prometheus format.
*
* @see {@link https://prometheus.io/docs/instrumenting/exposition_formats/}
*
**/
async exportPrometheusMetrics(globalVariables) {
const counts = await this.getJobCounts();
const metrics = [];
// Match the test's expected HELP text
metrics.push('# HELP bullmq_job_count Number of jobs in the queue by state');
metrics.push('# TYPE bullmq_job_count gauge');
const variables = !globalVariables
? ''
: Object.keys(globalVariables).reduce((acc, curr) => `${acc}, ${curr}="${globalVariables[curr]}"`, '');
for (const [state, count] of Object.entries(counts)) {
metrics.push(`bullmq_job_count{queue="${this.name}", state="${state}"${variables}} ${count}`);
}
return metrics.join('\n');
}
}
exports.QueueGetters = QueueGetters;
//# sourceMappingURL=queue-getters.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,43 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.QueueKeys = void 0;
class QueueKeys {
constructor(prefix = 'bull') {
this.prefix = prefix;
}
getKeys(name) {
const keys = {};
[
'',
'active',
'wait',
'waiting-children',
'paused',
'id',
'delayed',
'prioritized',
'stalled-check',
'completed',
'failed',
'stalled',
'repeat',
'limiter',
'meta',
'events',
'pc', // priority counter key
'marker', // marker key
'de', // deduplication key
].forEach(key => {
keys[key] = this.toKey(name, key);
});
return keys;
}
toKey(name, type) {
return `${this.getQueueQualifiedName(name)}:${type}`;
}
getQueueQualifiedName(name) {
return `${this.prefix}:${name}`;
}
}
exports.QueueKeys = QueueKeys;
//# sourceMappingURL=queue-keys.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-keys.js","sourceRoot":"","sources":["../../../src/classes/queue-keys.ts"],"names":[],"mappings":";;;AAEA,MAAa,SAAS;IACpB,YAA4B,SAAS,MAAM;QAAf,WAAM,GAAN,MAAM,CAAS;IAAG,CAAC;IAE/C,OAAO,CAAC,IAAY;QAClB,MAAM,IAAI,GAAgC,EAAE,CAAC;QAC7C;YACE,EAAE;YACF,QAAQ;YACR,MAAM;YACN,kBAAkB;YAClB,QAAQ;YACR,IAAI;YACJ,SAAS;YACT,aAAa;YACb,eAAe;YACf,WAAW;YACX,QAAQ;YACR,SAAS;YACT,QAAQ;YACR,SAAS;YACT,MAAM;YACN,QAAQ;YACR,IAAI,EAAE,uBAAuB;YAC7B,QAAQ,EAAE,aAAa;YACvB,IAAI,EAAE,oBAAoB;SAC3B,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE;YACd,IAAI,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,EAAE,GAAG,CAAC,CAAC;QACpC,CAAC,CAAC,CAAC;QAEH,OAAO,IAAI,CAAC;IACd,CAAC;IAED,KAAK,CAAC,IAAY,EAAE,IAAY;QAC9B,OAAO,GAAG,IAAI,CAAC,qBAAqB,CAAC,IAAI,CAAC,IAAI,IAAI,EAAE,CAAC;IACvD,CAAC;IAED,qBAAqB,CAAC,IAAY;QAChC,OAAO,GAAG,IAAI,CAAC,MAAM,IAAI,IAAI,EAAE,CAAC;IAClC,CAAC;CACF;AAvCD,8BAuCC"}

652
backend/node_modules/bullmq/dist/cjs/classes/queue.js generated vendored Normal file
View File

@@ -0,0 +1,652 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Queue = void 0;
const uuid_1 = require("uuid");
const job_1 = require("./job");
const queue_getters_1 = require("./queue-getters");
const repeat_1 = require("./repeat");
const enums_1 = require("../enums");
const job_scheduler_1 = require("./job-scheduler");
const version_1 = require("../version");
/**
* Queue
*
* This class provides methods to add jobs to a queue and some other high-level
* administration such as pausing or deleting queues.
*
* @typeParam DataType - The type of the data that the job will process.
* @typeParam ResultType - The type of the result of the job.
* @typeParam NameType - The type of the name of the job.
*
* @example
*
* ```typescript
* import { Queue } from 'bullmq';
*
* interface MyDataType {
* foo: string;
* }
*
* interface MyResultType {
* bar: string;
* }
*
* const queue = new Queue<MyDataType, MyResultType, "blue" | "brown">('myQueue');
* ```
*/
class Queue extends queue_getters_1.QueueGetters {
constructor(name, opts, Connection) {
var _a;
super(name, Object.assign({}, opts), Connection);
this.token = (0, uuid_1.v4)();
this.libName = 'bullmq';
this.jobsOpts = (_a = opts === null || opts === void 0 ? void 0 : opts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
this.waitUntilReady()
.then(client => {
if (!this.closing && !(opts === null || opts === void 0 ? void 0 : opts.skipMetasUpdate)) {
return client.hmset(this.keys.meta, this.metaValues);
}
})
.catch(err => {
// We ignore this error to avoid warnings. The error can still
// be received by listening to event 'error'
});
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Returns this instance current default job options.
*/
get defaultJobOptions() {
return Object.assign({}, this.jobsOpts);
}
get metaValues() {
var _a, _b, _c, _d;
return {
'opts.maxLenEvents': (_d = (_c = (_b = (_a = this.opts) === null || _a === void 0 ? void 0 : _a.streams) === null || _b === void 0 ? void 0 : _b.events) === null || _c === void 0 ? void 0 : _c.maxLen) !== null && _d !== void 0 ? _d : 10000,
version: `${this.libName}:${version_1.version}`,
};
}
/**
* Get library version.
*
* @returns the content of the meta.library field.
*/
async getVersion() {
const client = await this.client;
return await client.hget(this.keys.meta, 'version');
}
get repeat() {
return new Promise(async (resolve) => {
if (!this._repeat) {
this._repeat = new repeat_1.Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
this._repeat.on('error', e => this.emit.bind(this, e));
}
resolve(this._repeat);
});
}
get jobScheduler() {
return new Promise(async (resolve) => {
if (!this._jobScheduler) {
this._jobScheduler = new job_scheduler_1.JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
this._jobScheduler.on('error', e => this.emit.bind(this, e));
}
resolve(this._jobScheduler);
});
}
/**
* Enable and set global concurrency value.
* @param concurrency - Maximum number of simultaneous jobs that the workers can handle.
* For instance, setting this value to 1 ensures that no more than one job
* is processed at any given time. If this limit is not defined, there will be no
* restriction on the number of concurrent jobs.
*/
async setGlobalConcurrency(concurrency) {
const client = await this.client;
return client.hset(this.keys.meta, 'concurrency', concurrency);
}
/**
* Enable and set rate limit.
* @param max - Max number of jobs to process in the time period specified in `duration`
* @param duration - Time in milliseconds. During this time, a maximum of `max` jobs will be processed.
*/
async setGlobalRateLimit(max, duration) {
const client = await this.client;
return client.hset(this.keys.meta, 'max', max, 'duration', duration);
}
/**
* Remove global concurrency value.
*/
async removeGlobalConcurrency() {
const client = await this.client;
return client.hdel(this.keys.meta, 'concurrency');
}
/**
* Remove global rate limit values.
*/
async removeGlobalRateLimit() {
const client = await this.client;
return client.hdel(this.keys.meta, 'max', 'duration');
}
/**
* Adds a new job to the queue.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*/
async add(name, data, opts) {
return this.trace(enums_1.SpanKind.PRODUCER, 'add', `${this.name}.${name}`, async (span, srcPropagationMedatada) => {
var _a;
if (srcPropagationMedatada && !((_a = opts === null || opts === void 0 ? void 0 : opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext)) {
const telemetry = {
metadata: srcPropagationMedatada,
};
opts = Object.assign(Object.assign({}, opts), { telemetry });
}
const job = await this.addJob(name, data, opts);
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobName]: name,
[enums_1.TelemetryAttributes.JobId]: job.id,
});
return job;
});
}
/**
* addJob is a telemetry free version of the add method, useful in order to wrap it
* with custom telemetry on subclasses.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*
* @returns Job
*/
async addJob(name, data, opts) {
if (opts && opts.repeat) {
if (opts.repeat.endDate) {
if (+new Date(opts.repeat.endDate) < Date.now()) {
throw new Error('End date must be greater than current timestamp');
}
}
return (await this.repeat).updateRepeatableJob(name, data, Object.assign(Object.assign({}, this.jobsOpts), opts), { override: true });
}
else {
const jobId = opts === null || opts === void 0 ? void 0 : opts.jobId;
if (jobId == '0' || (jobId === null || jobId === void 0 ? void 0 : jobId.startsWith('0:'))) {
throw new Error("JobId cannot be '0' or start with 0:");
}
const job = await this.Job.create(this, name, data, Object.assign(Object.assign(Object.assign({}, this.jobsOpts), opts), { jobId }));
this.emit('waiting', job);
return job;
}
}
/**
* Adds an array of jobs to the queue. This method may be faster than adding
* one job at a time in a sequence.
*
* @param jobs - The array of jobs to add to the queue. Each job is defined by 3
* properties, 'name', 'data' and 'opts'. They follow the same signature as 'Queue.add'.
*/
async addBulk(jobs) {
return this.trace(enums_1.SpanKind.PRODUCER, 'addBulk', this.name, async (span, srcPropagationMedatada) => {
if (span) {
span.setAttributes({
[enums_1.TelemetryAttributes.BulkNames]: jobs.map(job => job.name),
[enums_1.TelemetryAttributes.BulkCount]: jobs.length,
});
}
return await this.Job.createBulk(this, jobs.map(job => {
var _a, _b, _c, _d, _e, _f;
let telemetry = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry;
if (srcPropagationMedatada) {
const omitContext = (_c = (_b = job.opts) === null || _b === void 0 ? void 0 : _b.telemetry) === null || _c === void 0 ? void 0 : _c.omitContext;
const telemetryMetadata = ((_e = (_d = job.opts) === null || _d === void 0 ? void 0 : _d.telemetry) === null || _e === void 0 ? void 0 : _e.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
return {
name: job.name,
data: job.data,
opts: Object.assign(Object.assign(Object.assign({}, this.jobsOpts), job.opts), { jobId: (_f = job.opts) === null || _f === void 0 ? void 0 : _f.jobId, telemetry }),
};
}));
});
}
/**
* Upserts a scheduler.
*
* A scheduler is a job factory that creates jobs at a given interval.
* Upserting a scheduler will create a new job scheduler or update an existing one.
* It will also create the first job based on the repeat options and delayed accordingly.
*
* @param key - Unique key for the repeatable job meta.
* @param repeatOpts - Repeat options
* @param jobTemplate - Job template. If provided it will be used for all the jobs
* created by the scheduler.
*
* @returns The next job to be scheduled (would normally be in delayed state).
*/
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobTemplate) {
var _a, _b;
if (repeatOpts.endDate) {
if (+new Date(repeatOpts.endDate) < Date.now()) {
throw new Error('End date must be greater than current timestamp');
}
}
return (await this.jobScheduler).upsertJobScheduler(jobSchedulerId, repeatOpts, (_a = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.name) !== null && _a !== void 0 ? _a : jobSchedulerId, (_b = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.data) !== null && _b !== void 0 ? _b : {}, Object.assign(Object.assign({}, this.jobsOpts), jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.opts), { override: true });
}
/**
* Pauses the processing of this queue globally.
*
* We use an atomic RENAME operation on the wait queue. Since
* we have blocking calls with BRPOPLPUSH on the wait queue, as long as the queue
* is renamed to 'paused', no new jobs will be processed (the current ones
* will run until finalized).
*
* Adding jobs requires a LUA script to check first if the paused list exist
* and in that case it will add it there instead of the wait list.
*/
async pause() {
await this.trace(enums_1.SpanKind.INTERNAL, 'pause', this.name, async () => {
await this.scripts.pause(true);
this.emit('paused');
});
}
/**
* Close the queue instance.
*
*/
async close() {
await this.trace(enums_1.SpanKind.INTERNAL, 'close', this.name, async () => {
if (!this.closing) {
if (this._repeat) {
await this._repeat.close();
}
}
await super.close();
});
}
/**
* Overrides the rate limit to be active for the next jobs.
*
* @param expireTimeMs - expire time in ms of this rate limit.
*/
async rateLimit(expireTimeMs) {
await this.trace(enums_1.SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueRateLimit]: expireTimeMs,
});
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
});
}
/**
* Resumes the processing of this queue globally.
*
* The method reverses the pause operation by resuming the processing of the
* queue.
*/
async resume() {
await this.trace(enums_1.SpanKind.INTERNAL, 'resume', this.name, async () => {
await this.scripts.pause(false);
this.emit('resumed');
});
}
/**
* Returns true if the queue is currently paused.
*/
async isPaused() {
const client = await this.client;
const pausedKeyExists = await client.hexists(this.keys.meta, 'paused');
return pausedKeyExists === 1;
}
/**
* Returns true if the queue is currently maxed.
*/
isMaxed() {
return this.scripts.isMaxed();
}
/**
* Get all repeatable meta jobs.
*
* @deprecated This method is deprecated and will be removed in v6. Use getJobSchedulers instead.
*
* @param start - Offset of first job to return.
* @param end - Offset of last job to return.
* @param asc - Determine the order in which jobs are returned based on their
* next execution time.
*/
async getRepeatableJobs(start, end, asc) {
return (await this.repeat).getRepeatableJobs(start, end, asc);
}
/**
* Get Job Scheduler by id
*
* @param id - identifier of scheduler.
*/
async getJobScheduler(id) {
return (await this.jobScheduler).getScheduler(id);
}
/**
* Get all Job Schedulers
*
* @param start - Offset of first scheduler to return.
* @param end - Offset of last scheduler to return.
* @param asc - Determine the order in which schedulers are returned based on their
* next execution time.
*/
async getJobSchedulers(start, end, asc) {
return (await this.jobScheduler).getJobSchedulers(start, end, asc);
}
/**
*
* Get the number of job schedulers.
*
* @returns The number of job schedulers.
*/
async getJobSchedulersCount() {
return (await this.jobScheduler).getSchedulersCount();
}
/**
* Removes a repeatable job.
*
* Note: you need to use the exact same repeatOpts when deleting a repeatable job
* than when adding it.
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @see removeRepeatableByKey
*
* @param name - Job name
* @param repeatOpts - Repeat options
* @param jobId - Job id to remove. If not provided, all jobs with the same repeatOpts
* @returns
*/
async removeRepeatable(name, repeatOpts, jobId) {
return this.trace(enums_1.SpanKind.INTERNAL, 'removeRepeatable', `${this.name}.${name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobName]: name,
[enums_1.TelemetryAttributes.JobId]: jobId,
});
const repeat = await this.repeat;
const removed = await repeat.removeRepeatable(name, repeatOpts, jobId);
return !removed;
});
}
/**
*
* Removes a job scheduler.
*
* @param jobSchedulerId - identifier of the job scheduler.
*
* @returns
*/
async removeJobScheduler(jobSchedulerId) {
const jobScheduler = await this.jobScheduler;
const removed = await jobScheduler.removeJobScheduler(jobSchedulerId);
return !removed;
}
/**
* Removes a debounce key.
* @deprecated use removeDeduplicationKey
*
* @param id - debounce identifier
*/
async removeDebounceKey(id) {
return this.trace(enums_1.SpanKind.INTERNAL, 'removeDebounceKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobKey]: id,
});
const client = await this.client;
return await client.del(`${this.keys.de}:${id}`);
});
}
/**
* Removes a deduplication key.
*
* @param id - identifier
*/
async removeDeduplicationKey(id) {
return this.trace(enums_1.SpanKind.INTERNAL, 'removeDeduplicationKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.DeduplicationKey]: id,
});
const client = await this.client;
return client.del(`${this.keys.de}:${id}`);
});
}
/**
* Removes rate limit key.
*/
async removeRateLimitKey() {
const client = await this.client;
return client.del(this.keys.limiter);
}
/**
* Removes a repeatable job by its key. Note that the key is the one used
* to store the repeatable job metadata and not one of the job iterations
* themselves. You can use "getRepeatableJobs" in order to get the keys.
*
* @see getRepeatableJobs
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @param repeatJobKey - To the repeatable job.
* @returns
*/
async removeRepeatableByKey(key) {
return this.trace(enums_1.SpanKind.INTERNAL, 'removeRepeatableByKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobKey]: key,
});
const repeat = await this.repeat;
const removed = await repeat.removeRepeatableByKey(key);
return !removed;
});
}
/**
* Removes the given job from the queue as well as all its
* dependencies.
*
* @param jobId - The id of the job to remove
* @param opts - Options to remove a job
* @returns 1 if it managed to remove the job or 0 if the job or
* any of its dependencies were locked.
*/
async remove(jobId, { removeChildren = true } = {}) {
return this.trace(enums_1.SpanKind.INTERNAL, 'remove', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobId]: jobId,
[enums_1.TelemetryAttributes.JobOptions]: JSON.stringify({
removeChildren,
}),
});
const code = await this.scripts.remove(jobId, removeChildren);
if (code === 1) {
this.emit('removed', jobId);
}
return code;
});
}
/**
* Updates the given job's progress.
*
* @param jobId - The id of the job to update
* @param progress - Number or object to be saved as progress.
*/
async updateJobProgress(jobId, progress) {
await this.trace(enums_1.SpanKind.INTERNAL, 'updateJobProgress', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobId]: jobId,
[enums_1.TelemetryAttributes.JobProgress]: JSON.stringify(progress),
});
await this.scripts.updateProgress(jobId, progress);
this.emit('progress', jobId, progress);
});
}
/**
* Logs one row of job's log data.
*
* @param jobId - The job id to log against.
* @param logRow - String with log data to be logged.
* @param keepLogs - Max number of log entries to keep (0 for unlimited).
*
* @returns The total number of log entries for this job so far.
*/
async addJobLog(jobId, logRow, keepLogs) {
return job_1.Job.addJobLog(this, jobId, logRow, keepLogs);
}
/**
* Drains the queue, i.e., removes all jobs that are waiting
* or delayed, but not active, completed or failed.
*
* @param delayed - Pass true if it should also clean the
* delayed jobs.
*/
async drain(delayed = false) {
await this.trace(enums_1.SpanKind.INTERNAL, 'drain', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueDrainDelay]: delayed,
});
await this.scripts.drain(delayed);
});
}
/**
* Cleans jobs from a queue. Similar to drain but keeps jobs within a certain
* grace period.
*
* @param grace - The grace period in milliseconds
* @param limit - Max number of jobs to clean
* @param type - The type of job to clean
* Possible values are completed, wait, active, paused, delayed, failed. Defaults to completed.
* @returns Id jobs from the deleted records
*/
async clean(grace, limit, type = 'completed') {
return this.trace(enums_1.SpanKind.INTERNAL, 'clean', this.name, async (span) => {
const maxCount = limit || Infinity;
const maxCountPerCall = Math.min(10000, maxCount);
const timestamp = Date.now() - grace;
let deletedCount = 0;
const deletedJobsIds = [];
// Normalize 'waiting' to 'wait' for consistency with internal Redis keys
const normalizedType = type === 'waiting' ? 'wait' : type;
while (deletedCount < maxCount) {
const jobsIds = await this.scripts.cleanJobsInSet(normalizedType, timestamp, maxCountPerCall);
this.emit('cleaned', jobsIds, normalizedType);
deletedCount += jobsIds.length;
deletedJobsIds.push(...jobsIds);
if (jobsIds.length < maxCountPerCall) {
break;
}
}
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueGrace]: grace,
[enums_1.TelemetryAttributes.JobType]: type,
[enums_1.TelemetryAttributes.QueueCleanLimit]: maxCount,
[enums_1.TelemetryAttributes.JobIds]: deletedJobsIds,
});
return deletedJobsIds;
});
}
/**
* Completely destroys the queue and all of its contents irreversibly.
* This method will *pause* the queue and requires that there are no
* active jobs. It is possible to bypass this requirement, i.e. not
* having active jobs using the "force" option.
*
* Note: This operation requires to iterate on all the jobs stored in the queue
* and can be slow for very large queues.
*
* @param opts - Obliterate options.
*/
async obliterate(opts) {
await this.trace(enums_1.SpanKind.INTERNAL, 'obliterate', this.name, async () => {
await this.pause();
let cursor = 0;
do {
cursor = await this.scripts.obliterate(Object.assign({ force: false, count: 1000 }, opts));
} while (cursor);
});
}
/**
* Retry all the failed or completed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration,
* - state failed by default or completed.
* - timestamp from which timestamp to start moving jobs to wait status, default Date.now().
*
* @returns
*/
async retryJobs(opts = {}) {
await this.trace(enums_1.SpanKind.PRODUCER, 'retryJobs', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
});
let cursor = 0;
do {
cursor = await this.scripts.retryJobs(opts.state, opts.count, opts.timestamp);
} while (cursor);
});
}
/**
* Promote all the delayed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration
*
* @returns
*/
async promoteJobs(opts = {}) {
await this.trace(enums_1.SpanKind.INTERNAL, 'promoteJobs', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
});
let cursor = 0;
do {
cursor = await this.scripts.promoteJobs(opts.count);
} while (cursor);
});
}
/**
* Trim the event stream to an approximately maxLength.
*
* @param maxLength -
*/
async trimEvents(maxLength) {
return this.trace(enums_1.SpanKind.INTERNAL, 'trimEvents', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.QueueEventMaxLength]: maxLength,
});
const client = await this.client;
return await client.xtrim(this.keys.events, 'MAXLEN', '~', maxLength);
});
}
/**
* Delete old priority helper key.
*/
async removeDeprecatedPriorityKey() {
const client = await this.client;
return client.del(this.toKey('priority'));
}
}
exports.Queue = Queue;
//# sourceMappingURL=queue.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,277 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.RedisConnection = void 0;
const tslib_1 = require("tslib");
const events_1 = require("events");
const ioredis_1 = require("ioredis");
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
const utils_1 = require("ioredis/built/utils");
const utils_2 = require("../utils");
const version_1 = require("../version");
const scripts = require("../scripts");
const overrideMessage = [
'BullMQ: WARNING! Your redis options maxRetriesPerRequest must be null',
'and will be overridden by BullMQ.',
].join(' ');
const deprecationMessage = 'BullMQ: Your redis options maxRetriesPerRequest must be null.';
class RedisConnection extends events_1.EventEmitter {
constructor(opts, extraOptions) {
super();
this.extraOptions = extraOptions;
this.capabilities = {
canDoubleTimeout: false,
canBlockFor1Ms: true,
};
this.status = 'initializing';
this.packageVersion = version_1.version;
// Set extra options defaults
this.extraOptions = Object.assign({ shared: false, blocking: true, skipVersionCheck: false, skipWaitingForReady: false }, extraOptions);
if (!(0, utils_2.isRedisInstance)(opts)) {
this.checkBlockingOptions(overrideMessage, opts);
this.opts = Object.assign({ port: 6379, host: '127.0.0.1', retryStrategy: function (times) {
return Math.max(Math.min(Math.exp(times), 20000), 1000);
} }, opts);
if (this.extraOptions.blocking) {
this.opts.maxRetriesPerRequest = null;
}
}
else {
this._client = opts;
// Test if the redis instance is using keyPrefix
// and if so, throw an error.
if (this._client.options.keyPrefix) {
throw new Error('BullMQ: ioredis does not support ioredis prefixes, use the prefix option instead.');
}
if ((0, utils_2.isRedisCluster)(this._client)) {
this.opts = this._client.options.redisOptions;
}
else {
this.opts = this._client.options;
}
this.checkBlockingOptions(deprecationMessage, this.opts, true);
}
this.skipVersionCheck =
(extraOptions === null || extraOptions === void 0 ? void 0 : extraOptions.skipVersionCheck) ||
!!(this.opts && this.opts.skipVersionCheck);
this.handleClientError = (err) => {
this.emit('error', err);
};
this.handleClientClose = () => {
this.emit('close');
};
this.handleClientReady = () => {
this.emit('ready');
};
this.initializing = this.init();
this.initializing.catch(err => this.emit('error', err));
}
checkBlockingOptions(msg, options, throwError = false) {
if (this.extraOptions.blocking && options && options.maxRetriesPerRequest) {
if (throwError) {
throw new Error(msg);
}
else {
console.error(msg);
}
}
}
/**
* Waits for a redis client to be ready.
* @param redis - client
*/
static async waitUntilReady(client) {
if (client.status === 'ready') {
return;
}
if (client.status === 'wait') {
return client.connect();
}
if (client.status === 'end') {
throw new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG);
}
let handleReady;
let handleEnd;
let handleError;
try {
await new Promise((resolve, reject) => {
let lastError;
handleError = (err) => {
lastError = err;
};
handleReady = () => {
resolve();
};
handleEnd = () => {
if (client.status !== 'end') {
reject(lastError || new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
}
else {
if (lastError) {
reject(lastError);
}
else {
// when custon 'end' status is set we already closed
resolve();
}
}
};
(0, utils_2.increaseMaxListeners)(client, 3);
client.once('ready', handleReady);
client.on('end', handleEnd);
client.once('error', handleError);
});
}
finally {
client.removeListener('end', handleEnd);
client.removeListener('error', handleError);
client.removeListener('ready', handleReady);
(0, utils_2.decreaseMaxListeners)(client, 3);
}
}
get client() {
return this.initializing;
}
loadCommands(packageVersion, providedScripts) {
const finalScripts = providedScripts || scripts;
for (const property in finalScripts) {
// Only define the command if not already defined
const commandName = `${finalScripts[property].name}:${packageVersion}`;
if (!this._client[commandName]) {
this._client.defineCommand(commandName, {
numberOfKeys: finalScripts[property].keys,
lua: finalScripts[property].content,
});
}
}
}
async init() {
if (!this._client) {
const _a = this.opts, { url } = _a, rest = tslib_1.__rest(_a, ["url"]);
this._client = url ? new ioredis_1.default(url, rest) : new ioredis_1.default(rest);
}
(0, utils_2.increaseMaxListeners)(this._client, 3);
this._client.on('error', this.handleClientError);
// ioredis treats connection errors as a different event ('close')
this._client.on('close', this.handleClientClose);
this._client.on('ready', this.handleClientReady);
if (!this.extraOptions.skipWaitingForReady) {
await RedisConnection.waitUntilReady(this._client);
}
this.loadCommands(this.packageVersion);
if (this._client['status'] !== 'end') {
this.version = await this.getRedisVersion();
if (this.skipVersionCheck !== true && !this.closing) {
if ((0, utils_2.isRedisVersionLowerThan)(this.version, RedisConnection.minimumVersion)) {
throw new Error(`Redis version needs to be greater or equal than ${RedisConnection.minimumVersion} ` +
`Current: ${this.version}`);
}
if ((0, utils_2.isRedisVersionLowerThan)(this.version, RedisConnection.recommendedMinimumVersion)) {
console.warn(`It is highly recommended to use a minimum Redis version of ${RedisConnection.recommendedMinimumVersion}
Current: ${this.version}`);
}
}
this.capabilities = {
canDoubleTimeout: !(0, utils_2.isRedisVersionLowerThan)(this.version, '6.0.0'),
canBlockFor1Ms: !(0, utils_2.isRedisVersionLowerThan)(this.version, '7.0.8'),
};
this.status = 'ready';
}
return this._client;
}
async disconnect(wait = true) {
const client = await this.client;
if (client.status !== 'end') {
let _resolve, _reject;
if (!wait) {
return client.disconnect();
}
const disconnecting = new Promise((resolve, reject) => {
(0, utils_2.increaseMaxListeners)(client, 2);
client.once('end', resolve);
client.once('error', reject);
_resolve = resolve;
_reject = reject;
});
client.disconnect();
try {
await disconnecting;
}
finally {
(0, utils_2.decreaseMaxListeners)(client, 2);
client.removeListener('end', _resolve);
client.removeListener('error', _reject);
}
}
}
async reconnect() {
const client = await this.client;
return client.connect();
}
async close(force = false) {
if (!this.closing) {
const status = this.status;
this.status = 'closing';
this.closing = true;
try {
if (status === 'ready') {
// Not sure if we need to wait for this
await this.initializing;
}
if (!this.extraOptions.shared) {
if (status == 'initializing' || force) {
// If we have not still connected to Redis, we need to disconnect.
this._client.disconnect();
}
else {
await this._client.quit();
}
// As IORedis does not update this status properly, we do it ourselves.
this._client['status'] = 'end';
}
}
catch (error) {
if ((0, utils_2.isNotConnectionError)(error)) {
throw error;
}
}
finally {
this._client.off('error', this.handleClientError);
this._client.off('close', this.handleClientClose);
this._client.off('ready', this.handleClientReady);
(0, utils_2.decreaseMaxListeners)(this._client, 3);
this.removeAllListeners();
this.status = 'closed';
}
}
}
async getRedisVersion() {
if (this.skipVersionCheck) {
return RedisConnection.minimumVersion;
}
const doc = await this._client.info();
const redisPrefix = 'redis_version:';
const maxMemoryPolicyPrefix = 'maxmemory_policy:';
const lines = doc.split(/\r?\n/);
let redisVersion;
for (let i = 0; i < lines.length; i++) {
if (lines[i].indexOf(maxMemoryPolicyPrefix) === 0) {
const maxMemoryPolicy = lines[i].substr(maxMemoryPolicyPrefix.length);
if (maxMemoryPolicy !== 'noeviction') {
console.warn(`IMPORTANT! Eviction policy is ${maxMemoryPolicy}. It should be "noeviction"`);
}
}
if (lines[i].indexOf(redisPrefix) === 0) {
redisVersion = lines[i].substr(redisPrefix.length);
}
}
return redisVersion;
}
get redisVersion() {
return this.version;
}
}
exports.RedisConnection = RedisConnection;
RedisConnection.minimumVersion = '5.0.0';
RedisConnection.recommendedMinimumVersion = '6.2.0';
//# sourceMappingURL=redis-connection.js.map

File diff suppressed because one or more lines are too long

204
backend/node_modules/bullmq/dist/cjs/classes/repeat.js generated vendored Normal file
View File

@@ -0,0 +1,204 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.getNextMillis = exports.Repeat = void 0;
const tslib_1 = require("tslib");
const cron_parser_1 = require("cron-parser");
const crypto_1 = require("crypto");
const queue_base_1 = require("./queue-base");
class Repeat extends queue_base_1.QueueBase {
constructor(name, opts, Connection) {
super(name, opts, Connection);
this.repeatStrategy =
(opts.settings && opts.settings.repeatStrategy) || exports.getNextMillis;
this.repeatKeyHashAlgorithm =
(opts.settings && opts.settings.repeatKeyHashAlgorithm) || 'md5';
}
async updateRepeatableJob(name, data, opts, { override }) {
var _a, _b;
// Backwards compatibility for repeatable jobs for versions <= 3.0.0
const repeatOpts = Object.assign({}, opts.repeat);
(_a = repeatOpts.pattern) !== null && _a !== void 0 ? _a : (repeatOpts.pattern = repeatOpts.cron);
delete repeatOpts.cron;
// Check if we reached the limit of the repeatable job's iterations
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
if (typeof repeatOpts.limit !== 'undefined' &&
iterationCount > repeatOpts.limit) {
return;
}
// Check if we reached the end date of the repeatable job
let now = Date.now();
const { endDate } = repeatOpts;
if (endDate && now > new Date(endDate).getTime()) {
return;
}
const prevMillis = opts.prevMillis || 0;
now = prevMillis < now ? now : prevMillis;
const nextMillis = await this.repeatStrategy(now, repeatOpts, name);
const { every, pattern } = repeatOpts;
const hasImmediately = Boolean((every || pattern) && repeatOpts.immediately);
const offset = hasImmediately && every ? now - nextMillis : undefined;
if (nextMillis) {
// We store the undecorated opts.jobId into the repeat options
if (!prevMillis && opts.jobId) {
repeatOpts.jobId = opts.jobId;
}
const legacyRepeatKey = getRepeatConcatOptions(name, repeatOpts);
const newRepeatKey = (_b = opts.repeat.key) !== null && _b !== void 0 ? _b : this.hash(legacyRepeatKey);
let repeatJobKey;
if (override) {
repeatJobKey = await this.scripts.addRepeatableJob(newRepeatKey, nextMillis, {
name,
endDate: endDate ? new Date(endDate).getTime() : undefined,
tz: repeatOpts.tz,
pattern,
every,
}, legacyRepeatKey);
}
else {
const client = await this.client;
repeatJobKey = await this.scripts.updateRepeatableJobMillis(client, newRepeatKey, nextMillis, legacyRepeatKey);
}
const { immediately } = repeatOpts, filteredRepeatOpts = tslib_1.__rest(repeatOpts, ["immediately"]);
return this.createNextJob(name, nextMillis, repeatJobKey, Object.assign(Object.assign({}, opts), { repeat: Object.assign({ offset }, filteredRepeatOpts) }), data, iterationCount, hasImmediately);
}
}
async createNextJob(name, nextMillis, repeatJobKey, opts, data, currentCount, hasImmediately) {
//
// Generate unique job id for this iteration.
//
const jobId = this.getRepeatJobKey(name, nextMillis, repeatJobKey, data);
const now = Date.now();
const delay = nextMillis + (opts.repeat.offset ? opts.repeat.offset : 0) - now;
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 || hasImmediately ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey });
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { count: currentCount });
return this.Job.create(this, name, data, mergedOpts);
}
// TODO: remove legacy code in next breaking change
getRepeatJobKey(name, nextMillis, repeatJobKey, data) {
if (repeatJobKey.split(':').length > 2) {
return this.getRepeatJobId({
name: name,
nextMillis: nextMillis,
namespace: this.hash(repeatJobKey),
jobId: data === null || data === void 0 ? void 0 : data.id,
});
}
return this.getRepeatDelayedJobId({
customKey: repeatJobKey,
nextMillis,
});
}
async removeRepeatable(name, repeat, jobId) {
var _a;
const repeatConcatOptions = getRepeatConcatOptions(name, Object.assign(Object.assign({}, repeat), { jobId }));
const repeatJobKey = (_a = repeat.key) !== null && _a !== void 0 ? _a : this.hash(repeatConcatOptions);
const legacyRepeatJobId = this.getRepeatJobId({
name,
nextMillis: '',
namespace: this.hash(repeatConcatOptions),
jobId: jobId !== null && jobId !== void 0 ? jobId : repeat.jobId,
key: repeat.key,
});
return this.scripts.removeRepeatable(legacyRepeatJobId, repeatConcatOptions, repeatJobKey);
}
async removeRepeatableByKey(repeatJobKey) {
const data = this.keyToData(repeatJobKey);
const legacyRepeatJobId = this.getRepeatJobId({
name: data.name,
nextMillis: '',
namespace: this.hash(repeatJobKey),
jobId: data.id,
});
return this.scripts.removeRepeatable(legacyRepeatJobId, '', repeatJobKey);
}
async getRepeatableData(client, key, next) {
const jobData = await client.hgetall(this.toKey('repeat:' + key));
if (jobData) {
return {
key,
name: jobData.name,
endDate: parseInt(jobData.endDate) || null,
tz: jobData.tz || null,
pattern: jobData.pattern || null,
every: jobData.every || null,
next,
};
}
return this.keyToData(key, next);
}
keyToData(key, next) {
const data = key.split(':');
const pattern = data.slice(4).join(':') || null;
return {
key,
name: data[0],
id: data[1] || null,
endDate: parseInt(data[2]) || null,
tz: data[3] || null,
pattern,
next,
};
}
async getRepeatableJobs(start = 0, end = -1, asc = false) {
const client = await this.client;
const key = this.keys.repeat;
const result = asc
? await client.zrange(key, start, end, 'WITHSCORES')
: await client.zrevrange(key, start, end, 'WITHSCORES');
const jobs = [];
for (let i = 0; i < result.length; i += 2) {
jobs.push(this.getRepeatableData(client, result[i], parseInt(result[i + 1])));
}
return Promise.all(jobs);
}
async getRepeatableCount() {
const client = await this.client;
return client.zcard(this.toKey('repeat'));
}
hash(str) {
return (0, crypto_1.createHash)(this.repeatKeyHashAlgorithm).update(str).digest('hex');
}
getRepeatDelayedJobId({ nextMillis, customKey, }) {
return `repeat:${customKey}:${nextMillis}`;
}
getRepeatJobId({ name, nextMillis, namespace, jobId, key, }) {
const checksum = key !== null && key !== void 0 ? key : this.hash(`${name}${jobId || ''}${namespace}`);
return `repeat:${checksum}:${nextMillis}`;
}
}
exports.Repeat = Repeat;
function getRepeatConcatOptions(name, repeat) {
const endDate = repeat.endDate ? new Date(repeat.endDate).getTime() : '';
const tz = repeat.tz || '';
const pattern = repeat.pattern;
const suffix = (pattern ? pattern : String(repeat.every)) || '';
const jobId = repeat.jobId ? repeat.jobId : '';
return `${name}:${jobId}:${endDate}:${tz}:${suffix}`;
}
const getNextMillis = (millis, opts) => {
const pattern = opts.pattern;
if (pattern && opts.every) {
throw new Error('Both .pattern and .every options are defined for this repeatable job');
}
if (opts.every) {
return (Math.floor(millis / opts.every) * opts.every +
(opts.immediately ? 0 : opts.every));
}
const currentDate = opts.startDate && new Date(opts.startDate) > new Date(millis)
? new Date(opts.startDate)
: new Date(millis);
const interval = (0, cron_parser_1.parseExpression)(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
try {
if (opts.immediately) {
return new Date().getTime();
}
else {
return interval.next().getTime();
}
}
catch (e) {
// Ignore error
}
};
exports.getNextMillis = getNextMillis;
//# sourceMappingURL=repeat.js.map

File diff suppressed because one or more lines are too long

111
backend/node_modules/bullmq/dist/cjs/classes/sandbox.js generated vendored Normal file
View File

@@ -0,0 +1,111 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const enums_1 = require("../enums");
const sandbox = (processFile, childPool) => {
return async function process(job, token) {
let child;
let msgHandler;
let exitHandler;
try {
const done = new Promise((resolve, reject) => {
const initChild = async () => {
try {
exitHandler = (exitCode, signal) => {
reject(new Error('Unexpected exit code: ' + exitCode + ' signal: ' + signal));
};
child = await childPool.retain(processFile);
child.on('exit', exitHandler);
msgHandler = async (msg) => {
var _a, _b, _c, _d, _e;
try {
switch (msg.cmd) {
case enums_1.ParentCommand.Completed:
resolve(msg.value);
break;
case enums_1.ParentCommand.Failed:
case enums_1.ParentCommand.Error: {
const err = new Error();
Object.assign(err, msg.value);
reject(err);
break;
}
case enums_1.ParentCommand.Progress:
await job.updateProgress(msg.value);
break;
case enums_1.ParentCommand.Log:
await job.log(msg.value);
break;
case enums_1.ParentCommand.MoveToDelayed:
await job.moveToDelayed((_a = msg.value) === null || _a === void 0 ? void 0 : _a.timestamp, (_b = msg.value) === null || _b === void 0 ? void 0 : _b.token);
break;
case enums_1.ParentCommand.MoveToWait:
await job.moveToWait((_c = msg.value) === null || _c === void 0 ? void 0 : _c.token);
break;
case enums_1.ParentCommand.MoveToWaitingChildren:
{
const value = await job.moveToWaitingChildren((_d = msg.value) === null || _d === void 0 ? void 0 : _d.token, (_e = msg.value) === null || _e === void 0 ? void 0 : _e.opts);
child.send({
requestId: msg.requestId,
cmd: enums_1.ChildCommand.MoveToWaitingChildrenResponse,
value,
});
}
break;
case enums_1.ParentCommand.Update:
await job.updateData(msg.value);
break;
case enums_1.ParentCommand.GetChildrenValues:
{
const value = await job.getChildrenValues();
child.send({
requestId: msg.requestId,
cmd: enums_1.ChildCommand.GetChildrenValuesResponse,
value,
});
}
break;
case enums_1.ParentCommand.GetIgnoredChildrenFailures:
{
const value = await job.getIgnoredChildrenFailures();
child.send({
requestId: msg.requestId,
cmd: enums_1.ChildCommand.GetIgnoredChildrenFailuresResponse,
value,
});
}
break;
}
}
catch (err) {
reject(err);
}
};
child.on('message', msgHandler);
child.send({
cmd: enums_1.ChildCommand.Start,
job: job.asJSONSandbox(),
token,
});
}
catch (error) {
reject(error);
}
};
initChild();
});
await done;
return done;
}
finally {
if (child) {
child.off('message', msgHandler);
child.off('exit', exitHandler);
if (child.exitCode === null && child.signalCode === null) {
childPool.release(child);
}
}
}
};
};
exports.default = sandbox;
//# sourceMappingURL=sandbox.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"sandbox.js","sourceRoot":"","sources":["../../../src/classes/sandbox.ts"],"names":[],"mappings":";;AAAA,oCAAuD;AAMvD,MAAM,OAAO,GAAG,CACd,WAAgB,EAChB,SAAoB,EACpB,EAAE;IACF,OAAO,KAAK,UAAU,OAAO,CAAC,GAAiB,EAAE,KAAc;QAC7D,IAAI,KAAY,CAAC;QACjB,IAAI,UAAe,CAAC;QACpB,IAAI,WAAgB,CAAC;QACrB,IAAI,CAAC;YACH,MAAM,IAAI,GAAe,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;gBACvD,MAAM,SAAS,GAAG,KAAK,IAAI,EAAE;oBAC3B,IAAI,CAAC;wBACH,WAAW,GAAG,CAAC,QAAa,EAAE,MAAW,EAAE,EAAE;4BAC3C,MAAM,CACJ,IAAI,KAAK,CACP,wBAAwB,GAAG,QAAQ,GAAG,WAAW,GAAG,MAAM,CAC3D,CACF,CAAC;wBACJ,CAAC,CAAC;wBAEF,KAAK,GAAG,MAAM,SAAS,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC;wBAC5C,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;wBAE9B,UAAU,GAAG,KAAK,EAAE,GAAiB,EAAE,EAAE;;4BACvC,IAAI,CAAC;gCACH,QAAQ,GAAG,CAAC,GAAG,EAAE,CAAC;oCAChB,KAAK,qBAAa,CAAC,SAAS;wCAC1B,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACnB,MAAM;oCACR,KAAK,qBAAa,CAAC,MAAM,CAAC;oCAC1B,KAAK,qBAAa,CAAC,KAAK,CAAC,CAAC,CAAC;wCACzB,MAAM,GAAG,GAAG,IAAI,KAAK,EAAE,CAAC;wCACxB,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC;wCAC9B,MAAM,CAAC,GAAG,CAAC,CAAC;wCACZ,MAAM;oCACR,CAAC;oCACD,KAAK,qBAAa,CAAC,QAAQ;wCACzB,MAAM,GAAG,CAAC,cAAc,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACpC,MAAM;oCACR,KAAK,qBAAa,CAAC,GAAG;wCACpB,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACzB,MAAM;oCACR,KAAK,qBAAa,CAAC,aAAa;wCAC9B,MAAM,GAAG,CAAC,aAAa,CACrB,MAAA,GAAG,CAAC,KAAK,0CAAE,SAAS,EACpB,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CACjB,CAAC;wCACF,MAAM;oCACR,KAAK,qBAAa,CAAC,UAAU;wCAC3B,MAAM,GAAG,CAAC,UAAU,CAAC,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CAAC,CAAC;wCACvC,MAAM;oCACR,KAAK,qBAAa,CAAC,qBAAqB;wCACtC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,qBAAqB,CAC3C,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,EAChB,MAAA,GAAG,CAAC,KAAK,0CAAE,IAAI,CAChB,CAAC;4CACF,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,6BAA6B;gDAC/C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,qBAAa,CAAC,MAAM;wCACvB,MAAM,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCAChC,MAAM;oCACR,KAAK,qBAAa,CAAC,iBAAiB;wCAClC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,iBAAiB,EAAE,CAAC;4CAC5C,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,yBAAyB;gDAC3C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,qBAAa,CAAC,0BAA0B;wCAC3C,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,0BAA0B,EAAE,CAAC;4CACrD,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,oBAAY,CAAC,kCAAkC;gDACpD,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;gCACV,CAAC;4BACH,CAAC;4BAAC,OAAO,GAAG,EAAE,CAAC;gCACb,MAAM,CAAC,GAAG,CAAC,CAAC;4BACd,CAAC;wBACH,CAAC,CAAC;wBAEF,KAAK,CAAC,EAAE,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;wBAEhC,KAAK,CAAC,IAAI,CAAC;4BACT,GAAG,EAAE,oBAAY,CAAC,KAAK;4BACvB,GAAG,EAAE,GAAG,CAAC,aAAa,EAAE;4BACxB,KAAK;yBACN,CAAC,CAAC;oBACL,CAAC;oBAAC,OAAO,KAAK,EAAE,CAAC;wBACf,MAAM,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;gBACH,CAAC,CAAC;gBACF,SAAS,EAAE,CAAC;YACd,CAAC,CAAC,CAAC;YAEH,MAAM,IAAI,CAAC;YACX,OAAO,IAAI,CAAC;QACd,CAAC;gBAAS,CAAC;YACT,IAAI,KAAK,EAAE,CAAC;gBACV,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;gBACjC,KAAK,CAAC,GAAG,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;gBAC/B,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;oBACzD,SAAS,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;gBAC3B,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC,CAAC;AACJ,CAAC,CAAC;AAEF,kBAAe,OAAO,CAAC"}

1200
backend/node_modules/bullmq/dist/cjs/classes/scripts.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

874
backend/node_modules/bullmq/dist/cjs/classes/worker.js generated vendored Normal file
View File

@@ -0,0 +1,874 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Worker = void 0;
const fs = require("fs");
const url_1 = require("url");
const path = require("path");
const uuid_1 = require("uuid");
// Note: this Polyfill is only needed for Node versions < 15.4.0
const node_abort_controller_1 = require("node-abort-controller");
const utils_1 = require("../utils");
const queue_base_1 = require("./queue-base");
const repeat_1 = require("./repeat");
const child_pool_1 = require("./child-pool");
const redis_connection_1 = require("./redis-connection");
const sandbox_1 = require("./sandbox");
const async_fifo_queue_1 = require("./async-fifo-queue");
const errors_1 = require("./errors");
const enums_1 = require("../enums");
const job_scheduler_1 = require("./job-scheduler");
const lock_manager_1 = require("./lock-manager");
// 10 seconds is the maximum time a BZPOPMIN can block.
const maximumBlockTimeout = 10;
/**
*
* This class represents a worker that is able to process jobs from the queue.
* As soon as the class is instantiated and a connection to Redis is established
* it will start processing jobs.
*
*/
class Worker extends queue_base_1.QueueBase {
static RateLimitError() {
return new errors_1.RateLimitError();
}
constructor(name, processor, opts, Connection) {
super(name, Object.assign(Object.assign({ drainDelay: 5, concurrency: 1, lockDuration: 30000, maximumRateLimitDelay: 30000, maxStalledCount: 1, stalledInterval: 30000, autorun: true, runRetryDelay: 15000 }, opts), { blockingConnection: true }), Connection);
this.abortDelayController = null;
this.blockUntil = 0;
this.drained = false;
this.limitUntil = 0;
this.processorAcceptsSignal = false;
this.waiting = null;
this.running = false;
this.mainLoopRunning = null;
if (!opts || !opts.connection) {
throw new Error('Worker requires a connection');
}
if (typeof this.opts.maxStalledCount !== 'number' ||
this.opts.maxStalledCount < 0) {
throw new Error('maxStalledCount must be greater or equal than 0');
}
if (typeof this.opts.maxStartedAttempts === 'number' &&
this.opts.maxStartedAttempts < 0) {
throw new Error('maxStartedAttempts must be greater or equal than 0');
}
if (typeof this.opts.stalledInterval !== 'number' ||
this.opts.stalledInterval <= 0) {
throw new Error('stalledInterval must be greater than 0');
}
if (typeof this.opts.drainDelay !== 'number' || this.opts.drainDelay <= 0) {
throw new Error('drainDelay must be greater than 0');
}
this.concurrency = this.opts.concurrency;
this.opts.lockRenewTime =
this.opts.lockRenewTime || this.opts.lockDuration / 2;
this.id = (0, uuid_1.v4)();
this.createLockManager();
if (processor) {
if (typeof processor === 'function') {
this.processFn = processor;
// Check if processor accepts signal parameter (3rd parameter)
this.processorAcceptsSignal = processor.length >= 3;
}
else {
// SANDBOXED
if (processor instanceof url_1.URL) {
if (!fs.existsSync(processor)) {
throw new Error(`URL ${processor} does not exist in the local file system`);
}
processor = processor.href;
}
else {
const supportedFileTypes = ['.js', '.ts', '.flow', '.cjs', '.mjs'];
const processorFile = processor +
(supportedFileTypes.includes(path.extname(processor)) ? '' : '.js');
if (!fs.existsSync(processorFile)) {
throw new Error(`File ${processorFile} does not exist`);
}
}
// Separate paths so that bundling tools can resolve dependencies easier
const dirname = path.dirname(module.filename || __filename);
const workerThreadsMainFile = path.join(dirname, 'main-worker.js');
const spawnProcessMainFile = path.join(dirname, 'main.js');
let mainFilePath = this.opts.useWorkerThreads
? workerThreadsMainFile
: spawnProcessMainFile;
try {
fs.statSync(mainFilePath); // would throw if file not exists
}
catch (_) {
const mainFile = this.opts.useWorkerThreads
? 'main-worker.js'
: 'main.js';
mainFilePath = path.join(process.cwd(), `dist/cjs/classes/${mainFile}`);
fs.statSync(mainFilePath);
}
this.childPool = new child_pool_1.ChildPool({
mainFile: mainFilePath,
useWorkerThreads: this.opts.useWorkerThreads,
workerForkOptions: this.opts.workerForkOptions,
workerThreadsOptions: this.opts.workerThreadsOptions,
});
this.createSandbox(processor);
}
if (this.opts.autorun) {
this.run().catch(error => this.emit('error', error));
}
}
const connectionName = this.clientName() + (this.opts.name ? `:w:${this.opts.name}` : '');
this.blockingConnection = new redis_connection_1.RedisConnection((0, utils_1.isRedisInstance)(opts.connection)
? opts.connection.duplicate({ connectionName })
: Object.assign(Object.assign({}, opts.connection), { connectionName }), {
shared: false,
blocking: true,
skipVersionCheck: opts.skipVersionCheck,
});
this.blockingConnection.on('error', error => this.emit('error', error));
this.blockingConnection.on('ready', () => setTimeout(() => this.emit('ready'), 0));
}
/**
* Creates and configures the lock manager for processing jobs.
* This method can be overridden in subclasses to customize lock manager behavior.
*/
createLockManager() {
this.lockManager = new lock_manager_1.LockManager(this, {
lockRenewTime: this.opts.lockRenewTime,
lockDuration: this.opts.lockDuration,
workerId: this.id,
workerName: this.opts.name,
});
}
/**
* Creates and configures the sandbox for processing jobs.
* This method can be overridden in subclasses to customize sandbox behavior.
*
* @param processor - The processor file path, URL, or function to be sandboxed
*/
createSandbox(processor) {
this.processFn = (0, sandbox_1.default)(processor, this.childPool).bind(this);
}
/**
* Public accessor method for LockManager to extend locks.
* This delegates to the protected scripts object.
*/
async extendJobLocks(jobIds, tokens, duration) {
return this.scripts.extendLocks(jobIds, tokens, duration);
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
callProcessJob(job, token, signal) {
return this.processFn(job, token, signal);
}
createJob(data, jobId) {
return this.Job.fromJSON(this, data, jobId);
}
/**
*
* Waits until the worker is ready to start processing jobs.
* In general only useful when writing tests.
*
*/
async waitUntilReady() {
await super.waitUntilReady();
return this.blockingConnection.client;
}
/**
* Cancels a specific job currently being processed by this worker.
* The job's processor function will receive an abort signal.
*
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId, reason) {
return this.lockManager.cancelJob(jobId, reason);
}
/**
* Cancels all jobs currently being processed by this worker.
* All active job processor functions will receive abort signals.
*
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason) {
this.lockManager.cancelAllJobs(reason);
}
set concurrency(concurrency) {
if (typeof concurrency !== 'number' ||
concurrency < 1 ||
!isFinite(concurrency)) {
throw new Error('concurrency must be a finite number greater than 0');
}
this._concurrency = concurrency;
}
get concurrency() {
return this._concurrency;
}
get repeat() {
return new Promise(async (resolve) => {
if (!this._repeat) {
const connection = await this.client;
this._repeat = new repeat_1.Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
this._repeat.on('error', e => this.emit.bind(this, e));
}
resolve(this._repeat);
});
}
get jobScheduler() {
return new Promise(async (resolve) => {
if (!this._jobScheduler) {
const connection = await this.client;
this._jobScheduler = new job_scheduler_1.JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
this._jobScheduler.on('error', e => this.emit.bind(this, e));
}
resolve(this._jobScheduler);
});
}
async run() {
if (!this.processFn) {
throw new Error('No process function is defined.');
}
if (this.running) {
throw new Error('Worker is already running.');
}
try {
this.running = true;
if (this.closing || this.paused) {
return;
}
await this.startStalledCheckTimer();
if (!this.opts.skipLockRenewal) {
this.lockManager.start();
}
const client = await this.client;
const bclient = await this.blockingConnection.client;
this.mainLoopRunning = this.mainLoop(client, bclient);
// We must await here or finally will be called too early.
await this.mainLoopRunning;
}
finally {
this.running = false;
}
}
async waitForRateLimit() {
var _a;
const limitUntil = this.limitUntil;
if (limitUntil > Date.now()) {
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
this.abortDelayController = new node_abort_controller_1.AbortController();
const delay = this.getRateLimitDelay(limitUntil - Date.now());
await this.delay(delay, this.abortDelayController);
this.drained = false;
this.limitUntil = 0;
}
}
/**
* This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
* as efficiently as possible, providing concurrency and minimal unnecessary calls
* to Redis.
*/
async mainLoop(client, bclient) {
const asyncFifoQueue = new async_fifo_queue_1.AsyncFifoQueue();
let tokenPostfix = 0;
while ((!this.closing && !this.paused) || asyncFifoQueue.numTotal() > 0) {
/**
* This inner loop tries to fetch jobs concurrently, but if we are waiting for a job
* to arrive at the queue we should not try to fetch more jobs (as it would be pointless)
*/
while (!this.closing &&
!this.paused &&
!this.waiting &&
asyncFifoQueue.numTotal() < this._concurrency &&
!this.isRateLimited()) {
const token = `${this.id}:${tokenPostfix++}`;
const fetchedJob = this.retryIfFailed(() => this._getNextJob(client, bclient, token, { block: true }), {
delayInMs: this.opts.runRetryDelay,
onlyEmitError: true,
});
asyncFifoQueue.add(fetchedJob);
if (this.waiting && asyncFifoQueue.numTotal() > 1) {
// We are waiting for jobs but we have others that we could start processing already
break;
}
// We await here so that we fetch jobs in sequence, this is important to avoid unnecessary calls
// to Redis in high concurrency scenarios.
const job = await fetchedJob;
// No more jobs waiting but we have others that could start processing already
if (!job && asyncFifoQueue.numTotal() > 1) {
break;
}
// If there are potential jobs to be processed and blockUntil is set, we should exit to avoid waiting
// for processing this job.
if (this.blockUntil) {
break;
}
}
// Since there can be undefined jobs in the queue (when a job fails or queue is empty)
// we iterate until we find a job.
let job;
do {
job = await asyncFifoQueue.fetch();
} while (!job && asyncFifoQueue.numQueued() > 0);
if (job) {
const token = job.token;
asyncFifoQueue.add(this.processJob(job, token, () => asyncFifoQueue.numTotal() <= this._concurrency));
}
else if (asyncFifoQueue.numQueued() === 0) {
await this.waitForRateLimit();
}
}
}
/**
* Returns a promise that resolves to the next job in queue.
* @param token - worker token to be assigned to retrieved job
* @returns a Job or undefined if no job was available in the queue.
*/
async getNextJob(token, { block = true } = {}) {
var _a, _b;
const nextJob = await this._getNextJob(await this.client, await this.blockingConnection.client, token, { block });
return this.trace(enums_1.SpanKind.INTERNAL, 'getNextJob', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.QueueName]: this.name,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
[enums_1.TelemetryAttributes.WorkerOptions]: JSON.stringify({ block }),
[enums_1.TelemetryAttributes.JobId]: nextJob === null || nextJob === void 0 ? void 0 : nextJob.id,
});
return nextJob;
}, (_b = (_a = nextJob === null || nextJob === void 0 ? void 0 : nextJob.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata);
}
async _getNextJob(client, bclient, token, { block = true } = {}) {
if (this.paused) {
return;
}
if (this.closing) {
return;
}
if (this.drained && block && !this.limitUntil && !this.waiting) {
this.waiting = this.waitForJob(bclient, this.blockUntil);
try {
this.blockUntil = await this.waiting;
if (this.blockUntil <= 0 || this.blockUntil - Date.now() < 1) {
return await this.moveToActive(client, token, this.opts.name);
}
}
finally {
this.waiting = null;
}
}
else {
if (!this.isRateLimited()) {
return this.moveToActive(client, token, this.opts.name);
}
}
}
/**
* Overrides the rate limit to be active for the next jobs.
* @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
* @param expireTimeMs - expire time in ms of this rate limit.
*/
async rateLimit(expireTimeMs) {
await this.trace(enums_1.SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerRateLimit]: expireTimeMs,
});
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
});
}
get minimumBlockTimeout() {
return this.blockingConnection.capabilities.canBlockFor1Ms
? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds.
Obviously we can still process much faster than 1 job per millisecond but delays and rate limits
will never work with more accuracy than 1ms. */
0.001
: 0.002;
}
isRateLimited() {
return this.limitUntil > Date.now();
}
async moveToActive(client, token, name) {
const [jobData, id, rateLimitDelay, delayUntil] = await this.scripts.moveToActive(client, token, name);
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, id, token);
}
async waitForJob(bclient, blockUntil) {
if (this.paused) {
return Infinity;
}
let timeout;
try {
if (!this.closing && !this.isRateLimited()) {
let blockTimeout = this.getBlockTimeout(blockUntil);
if (blockTimeout > 0) {
blockTimeout = this.blockingConnection.capabilities.canDoubleTimeout
? blockTimeout
: Math.ceil(blockTimeout);
// We cannot trust that the blocking connection stays blocking forever
// due to issues in Redis and IORedis, so we will reconnect if we
// don't get a response in the expected time.
timeout = setTimeout(async () => {
bclient.disconnect(!this.closing);
}, blockTimeout * 1000 + 1000);
this.updateDelays(); // reset delays to avoid reusing same values in next iteration
// Markers should only be used for un-blocking, so we will handle them in this
// function only.
const result = await bclient.bzpopmin(this.keys.marker, blockTimeout);
if (result) {
const [_key, member, score] = result;
if (member) {
const newBlockUntil = parseInt(score);
// Use by pro version as rate limited groups could generate lower blockUntil values
// markers only return delays for delayed jobs
if (blockUntil && newBlockUntil > blockUntil) {
return blockUntil;
}
return newBlockUntil;
}
}
}
return 0;
}
}
catch (error) {
if ((0, utils_1.isNotConnectionError)(error)) {
this.emit('error', error);
}
if (!this.closing) {
await this.delay();
}
}
finally {
clearTimeout(timeout);
}
return Infinity;
}
getBlockTimeout(blockUntil) {
const opts = this.opts;
// when there are delayed jobs
if (blockUntil) {
const blockDelay = blockUntil - Date.now();
// when we reach the time to get new jobs
if (blockDelay <= 0) {
return blockDelay;
}
else if (blockDelay < this.minimumBlockTimeout * 1000) {
return this.minimumBlockTimeout;
}
else {
// We restrict the maximum block timeout to 10 second to avoid
// blocking the connection for too long in the case of reconnections
// reference: https://github.com/taskforcesh/bullmq/issues/1658
return Math.min(blockDelay / 1000, maximumBlockTimeout);
}
}
else {
return Math.max(opts.drainDelay, this.minimumBlockTimeout);
}
}
getRateLimitDelay(delay) {
// We restrict the maximum limit delay to the configured maximumRateLimitDelay
// to be able to promote delayed jobs while the queue is rate limited
return Math.min(delay, this.opts.maximumRateLimitDelay);
}
/**
*
* This function is exposed only for testing purposes.
*/
async delay(milliseconds, abortController) {
await (0, utils_1.delay)(milliseconds || utils_1.DELAY_TIME_1, abortController);
}
updateDelays(limitDelay = 0, delayUntil = 0) {
const clampedLimit = Math.max(limitDelay, 0);
if (clampedLimit > 0) {
this.limitUntil = Date.now() + clampedLimit;
}
else {
this.limitUntil = 0;
}
this.blockUntil = Math.max(delayUntil, 0) || 0;
}
async nextJobFromJobData(jobData, jobId, token) {
if (!jobData) {
if (!this.drained) {
this.emit('drained');
this.drained = true;
}
}
else {
this.drained = false;
const job = this.createJob(jobData, jobId);
job.token = token;
try {
await this.retryIfFailed(async () => {
if (job.repeatJobKey && job.repeatJobKey.split(':').length < 5) {
const jobScheduler = await this.jobScheduler;
await jobScheduler.upsertJobScheduler(
// Most of these arguments are not really needed
// anymore as we read them from the job scheduler itself
job.repeatJobKey, job.opts.repeat, job.name, job.data, job.opts, { override: false, producerId: job.id });
}
else if (job.opts.repeat) {
const repeat = await this.repeat;
await repeat.updateRepeatableJob(job.name, job.data, job.opts, {
override: false,
});
}
}, { delayInMs: this.opts.runRetryDelay });
}
catch (err) {
// Emit error but don't throw to avoid breaking current job completion
// Note: This means the next repeatable job will not be scheduled
const errorMessage = err instanceof Error ? err.message : String(err);
const schedulingError = new Error(`Failed to add repeatable job for next iteration: ${errorMessage}`);
this.emit('error', schedulingError);
// Return undefined to indicate no next job is available
return undefined;
}
return job;
}
}
async processJob(job, token, fetchNextCallback = () => true) {
var _a, _b;
const srcPropagationMedatada = (_b = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata;
return this.trace(enums_1.SpanKind.CONSUMER, 'process', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
[enums_1.TelemetryAttributes.JobId]: job.id,
[enums_1.TelemetryAttributes.JobName]: job.name,
});
this.emit('active', job, 'waiting');
const processedOn = Date.now();
const abortController = this.lockManager.trackJob(job.id, token, processedOn, this.processorAcceptsSignal);
try {
const unrecoverableErrorMessage = this.getUnrecoverableErrorMessage(job);
if (unrecoverableErrorMessage) {
const failed = await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleFailed(new errors_1.UnrecoverableError(unrecoverableErrorMessage), job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span });
return failed;
}
const result = await this.callProcessJob(job, token, abortController
? abortController.signal
: undefined);
return await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleCompleted(result, job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span });
}
catch (err) {
const failed = await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleFailed(err, job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span, onlyEmitError: true });
return failed;
}
finally {
this.lockManager.untrackJob(job.id);
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobFinishedTimestamp]: Date.now(),
[enums_1.TelemetryAttributes.JobProcessedTimestamp]: processedOn,
});
}
}, srcPropagationMedatada);
}
getUnrecoverableErrorMessage(job) {
if (job.deferredFailure) {
return job.deferredFailure;
}
if (this.opts.maxStartedAttempts &&
this.opts.maxStartedAttempts < job.attemptsStarted) {
return 'job started more than allowable limit';
}
}
async handleCompleted(result, job, token, fetchNextCallback = () => true, span) {
if (!this.connection.closing) {
const completed = await job.moveToCompleted(result, token, fetchNextCallback() && !(this.closing || this.paused));
this.emit('completed', job, result, 'active');
span === null || span === void 0 ? void 0 : span.addEvent('job completed', {
[enums_1.TelemetryAttributes.JobResult]: JSON.stringify(result),
});
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
});
if (Array.isArray(completed)) {
const [jobData, jobId, rateLimitDelay, delayUntil] = completed;
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, jobId, token);
}
}
}
async handleFailed(err, job, token, fetchNextCallback = () => true, span) {
if (!this.connection.closing) {
// Check if the job was manually rate-limited
if (err.message === errors_1.RATE_LIMIT_ERROR) {
const rateLimitTtl = await this.moveLimitedBackToWait(job, token);
this.limitUntil = rateLimitTtl > 0 ? Date.now() + rateLimitTtl : 0;
return;
}
if (err instanceof errors_1.DelayedError ||
err.name == 'DelayedError' ||
err instanceof errors_1.WaitingError ||
err.name == 'WaitingError' ||
err instanceof errors_1.WaitingChildrenError ||
err.name == 'WaitingChildrenError') {
const client = await this.client;
return this.moveToActive(client, token, this.opts.name);
}
const result = await job.moveToFailed(err, token, fetchNextCallback() && !(this.closing || this.paused));
this.emit('failed', job, err, 'active');
span === null || span === void 0 ? void 0 : span.addEvent('job failed', {
[enums_1.TelemetryAttributes.JobFailedReason]: err.message,
});
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
});
// Note: result can be undefined if moveToFailed fails (e.g., lock was lost)
if (Array.isArray(result)) {
const [jobData, jobId, rateLimitDelay, delayUntil] = result;
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, jobId, token);
}
}
}
/**
*
* Pauses the processing of this queue only for this worker.
*/
async pause(doNotWaitActive) {
await this.trace(enums_1.SpanKind.INTERNAL, 'pause', this.name, async (span) => {
var _a;
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
[enums_1.TelemetryAttributes.WorkerDoNotWaitActive]: doNotWaitActive,
});
if (!this.paused) {
this.paused = true;
if (!doNotWaitActive) {
await this.whenCurrentJobsFinished();
}
(_a = this.stalledCheckStopper) === null || _a === void 0 ? void 0 : _a.call(this);
this.emit('paused');
}
});
}
/**
*
* Resumes processing of this worker (if paused).
*/
resume() {
if (!this.running) {
this.trace(enums_1.SpanKind.INTERNAL, 'resume', this.name, span => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
});
this.paused = false;
if (this.processFn) {
this.run();
}
this.emit('resumed');
});
}
}
/**
*
* Checks if worker is paused.
*
* @returns true if worker is paused, false otherwise.
*/
isPaused() {
return !!this.paused;
}
/**
*
* Checks if worker is currently running.
*
* @returns true if worker is running, false otherwise.
*/
isRunning() {
return this.running;
}
/**
*
* Closes the worker and related redis connections.
*
* This method waits for current jobs to finalize before returning.
*
* @param force - Use force boolean parameter if you do not want to wait for
* current jobs to be processed. When using telemetry, be mindful that it can
* interfere with the proper closure of spans, potentially preventing them from being exported.
*
* @returns Promise that resolves when the worker has been closed.
*/
async close(force = false) {
if (this.closing) {
return this.closing;
}
this.closing = (async () => {
await this.trace(enums_1.SpanKind.INTERNAL, 'close', this.name, async (span) => {
var _a, _b;
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
[enums_1.TelemetryAttributes.WorkerForceClose]: force,
});
this.emit('closing', 'closing queue');
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
// Define the async cleanup functions
const asyncCleanups = [
() => {
return force || this.whenCurrentJobsFinished(false);
},
() => this.lockManager.close(),
() => { var _a; return (_a = this.childPool) === null || _a === void 0 ? void 0 : _a.clean(); },
() => this.blockingConnection.close(force),
() => this.connection.close(force),
];
// Run cleanup functions sequentially and make sure all are run despite any errors
for (const cleanup of asyncCleanups) {
try {
await cleanup();
}
catch (err) {
this.emit('error', err);
}
}
(_b = this.stalledCheckStopper) === null || _b === void 0 ? void 0 : _b.call(this);
this.closed = true;
this.emit('closed');
});
})();
return await this.closing;
}
/**
*
* Manually starts the stalled checker.
* The check will run once as soon as this method is called, and
* then every opts.stalledInterval milliseconds until the worker is closed.
* Note: Normally you do not need to call this method, since the stalled checker
* is automatically started when the worker starts processing jobs after
* calling run. However if you want to process the jobs manually you need
* to call this method to start the stalled checker.
*
* @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
*/
async startStalledCheckTimer() {
if (!this.opts.skipStalledCheck) {
if (!this.closing) {
await this.trace(enums_1.SpanKind.INTERNAL, 'startStalledCheckTimer', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
});
this.stalledChecker().catch(err => {
this.emit('error', err);
});
});
}
}
}
async stalledChecker() {
while (!(this.closing || this.paused)) {
await this.checkConnectionError(() => this.moveStalledJobsToWait());
await new Promise(resolve => {
const timeout = setTimeout(resolve, this.opts.stalledInterval);
this.stalledCheckStopper = () => {
clearTimeout(timeout);
resolve();
};
});
}
}
/**
* Returns a promise that resolves when active jobs are cleared
*
* @returns
*/
async whenCurrentJobsFinished(reconnect = true) {
//
// Force reconnection of blocking connection to abort blocking redis call immediately.
//
if (this.waiting) {
// If we are not going to reconnect, we will not wait for the disconnection.
await this.blockingConnection.disconnect(reconnect);
}
else {
reconnect = false;
}
if (this.mainLoopRunning) {
await this.mainLoopRunning;
}
reconnect && (await this.blockingConnection.reconnect());
}
async retryIfFailed(fn, opts) {
var _a;
let retry = 0;
const maxRetries = opts.maxRetries || Infinity;
do {
try {
return await fn();
}
catch (err) {
(_a = opts.span) === null || _a === void 0 ? void 0 : _a.recordException(err.message);
if ((0, utils_1.isNotConnectionError)(err)) {
// Emit error when not paused or closing; optionally swallow (no throw) when opts.onlyEmitError is set.
if (!this.paused && !this.closing) {
this.emit('error', err);
}
if (opts.onlyEmitError) {
return;
}
else {
throw err;
}
}
else {
if (opts.delayInMs && !this.closing && !this.closed) {
await this.delay(opts.delayInMs, this.abortDelayController);
}
if (retry + 1 >= maxRetries) {
// If we've reached max retries, throw the last error
throw err;
}
}
}
} while (++retry < maxRetries);
}
async moveStalledJobsToWait() {
await this.trace(enums_1.SpanKind.INTERNAL, 'moveStalledJobsToWait', this.name, async (span) => {
const stalled = await this.scripts.moveStalledJobsToWait();
span === null || span === void 0 ? void 0 : span.setAttributes({
[enums_1.TelemetryAttributes.WorkerId]: this.id,
[enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
[enums_1.TelemetryAttributes.WorkerStalledJobs]: stalled,
});
stalled.forEach((jobId) => {
span === null || span === void 0 ? void 0 : span.addEvent('job stalled', {
[enums_1.TelemetryAttributes.JobId]: jobId,
});
this.emit('stalled', jobId, 'active');
});
});
}
moveLimitedBackToWait(job, token) {
return job.moveToWait(token);
}
}
exports.Worker = Worker;
//# sourceMappingURL=worker.js.map

File diff suppressed because one or more lines are too long