Projektstart

This commit is contained in:
2026-01-22 15:49:12 +01:00
parent 7212eb6f7a
commit 57e5f652f8
10637 changed files with 2598792 additions and 64 deletions

View File

@@ -0,0 +1,42 @@
/**
* (c) 2017-2025 BullForce Labs AB, MIT Licensed.
* @see LICENSE.md
*
*/
/**
* AsyncFifoQueue
*
* A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
* and consume them in the order they are resolved.
*/
export declare class AsyncFifoQueue<T> {
private ignoreErrors;
/**
* A queue of completed promises. As the pending
* promises are resolved, they are added to this queue.
*/
private queue;
/**
* A set of pending promises.
*/
private pending;
/**
* The next promise to be resolved. As soon as a pending promise
* is resolved, this promise is resolved with the result of the
* pending promise.
*/
private nextPromise;
private resolve;
private reject;
constructor(ignoreErrors?: boolean);
add(promise: Promise<T>): void;
waitAll(): Promise<void>;
numTotal(): number;
numPending(): number;
numQueued(): number;
private resolvePromise;
private rejectPromise;
private newPromise;
private wait;
fetch(): Promise<T | void>;
}

View File

@@ -0,0 +1,130 @@
/**
* (c) 2017-2025 BullForce Labs AB, MIT Licensed.
* @see LICENSE.md
*
*/
class Node {
constructor(value) {
this.value = undefined;
this.next = null;
this.value = value;
}
}
class LinkedList {
constructor() {
this.length = 0;
this.head = null;
this.tail = null;
}
push(value) {
const newNode = new Node(value);
if (!this.length) {
this.head = newNode;
}
else {
this.tail.next = newNode;
}
this.tail = newNode;
this.length += 1;
return newNode;
}
shift() {
if (!this.length) {
return null;
}
else {
const head = this.head;
this.head = this.head.next;
this.length -= 1;
return head;
}
}
}
/**
* AsyncFifoQueue
*
* A minimal FIFO queue for asynchronous operations. Allows adding asynchronous operations
* and consume them in the order they are resolved.
*/
export class AsyncFifoQueue {
constructor(ignoreErrors = false) {
this.ignoreErrors = ignoreErrors;
/**
* A queue of completed promises. As the pending
* promises are resolved, they are added to this queue.
*/
this.queue = new LinkedList();
/**
* A set of pending promises.
*/
this.pending = new Set();
this.newPromise();
}
add(promise) {
this.pending.add(promise);
promise
.then(data => {
this.pending.delete(promise);
if (this.queue.length === 0) {
this.resolvePromise(data);
}
this.queue.push(data);
})
.catch(err => {
// Ignore errors
if (this.ignoreErrors) {
this.queue.push(undefined);
}
this.pending.delete(promise);
this.rejectPromise(err);
});
}
async waitAll() {
await Promise.all(this.pending);
}
numTotal() {
return this.pending.size + this.queue.length;
}
numPending() {
return this.pending.size;
}
numQueued() {
return this.queue.length;
}
resolvePromise(data) {
this.resolve(data);
this.newPromise();
}
rejectPromise(err) {
this.reject(err);
this.newPromise();
}
newPromise() {
this.nextPromise = new Promise((resolve, reject) => {
this.resolve = resolve;
this.reject = reject;
});
}
async wait() {
return this.nextPromise;
}
async fetch() {
var _a;
if (this.pending.size === 0 && this.queue.length === 0) {
return;
}
while (this.queue.length === 0) {
try {
await this.wait();
}
catch (err) {
// Ignore errors
if (!this.ignoreErrors) {
console.error('Unexpected Error in AsyncFifoQueue', err);
}
}
}
return (_a = this.queue.shift()) === null || _a === void 0 ? void 0 : _a.value;
}
}
//# sourceMappingURL=async-fifo-queue.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"async-fifo-queue.js","sourceRoot":"","sources":["../../../src/classes/async-fifo-queue.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,IAAI;IAIR,YAAY,KAAQ;QAHpB,UAAK,GAAkB,SAAS,CAAC;QACjC,SAAI,GAAmB,IAAI,CAAC;QAG1B,IAAI,CAAC,KAAK,GAAG,KAAK,CAAC;IACrB,CAAC;CACF;AAED,MAAM,UAAU;IAKd;QAJA,WAAM,GAAG,CAAC,CAAC;QAKT,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;QACjB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED,IAAI,CAAC,KAAQ;QACX,MAAM,OAAO,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,CAAC;QAChC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACtB,CAAC;aAAM,CAAC;YACN,IAAI,CAAC,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QAC3B,CAAC;QAED,IAAI,CAAC,IAAI,GAAG,OAAO,CAAC;QACpB,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;QACjB,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,KAAK;QACH,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,OAAO,IAAI,CAAC;QACd,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;YACvB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC;YAC3B,IAAI,CAAC,MAAM,IAAI,CAAC,CAAC;YAEjB,OAAO,IAAI,CAAC;QACd,CAAC;IACH,CAAC;CACF;AAED;;;;;GAKG;AACH,MAAM,OAAO,cAAc;IAqBzB,YAAoB,eAAe,KAAK;QAApB,iBAAY,GAAZ,YAAY,CAAQ;QApBxC;;;WAGG;QACK,UAAK,GAAkB,IAAI,UAAU,EAAE,CAAC;QAEhD;;WAEG;QACK,YAAO,GAAG,IAAI,GAAG,EAAc,CAAC;QAYtC,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEM,GAAG,CAAC,OAAmB;QAC5B,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;QAE1B,OAAO;aACJ,IAAI,CAAC,IAAI,CAAC,EAAE;YACX,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAE7B,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBAC5B,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC;YAC5B,CAAC;YACD,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QACxB,CAAC,CAAC;aACD,KAAK,CAAC,GAAG,CAAC,EAAE;YACX,gBAAgB;YAChB,IAAI,IAAI,CAAC,YAAY,EAAE,CAAC;gBACtB,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;YAC7B,CAAC;YACD,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC;YAC7B,IAAI,CAAC,aAAa,CAAC,GAAG,CAAC,CAAC;QAC1B,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,KAAK,CAAC,OAAO;QAClB,MAAM,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IAClC,CAAC;IAEM,QAAQ;QACb,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC/C,CAAC;IAEM,UAAU;QACf,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC;IAC3B,CAAC;IAEM,SAAS;QACd,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC;IAC3B,CAAC;IAEO,cAAc,CAAC,IAAO;QAC5B,IAAI,CAAC,OAAQ,CAAC,IAAI,CAAC,CAAC;QACpB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,aAAa,CAAC,GAAQ;QAC5B,IAAI,CAAC,MAAO,CAAC,GAAG,CAAC,CAAC;QAClB,IAAI,CAAC,UAAU,EAAE,CAAC;IACpB,CAAC;IAEO,UAAU;QAChB,IAAI,CAAC,WAAW,GAAG,IAAI,OAAO,CAAgB,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;YAChE,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;YACvB,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACvB,CAAC,CAAC,CAAC;IACL,CAAC;IAEO,KAAK,CAAC,IAAI;QAChB,OAAO,IAAI,CAAC,WAAW,CAAC;IAC1B,CAAC;IAEM,KAAK,CAAC,KAAK;;QAChB,IAAI,IAAI,CAAC,OAAO,CAAC,IAAI,KAAK,CAAC,IAAI,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACvD,OAAO;QACT,CAAC;QACD,OAAO,IAAI,CAAC,KAAK,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC/B,IAAI,CAAC;gBACH,MAAM,IAAI,CAAC,IAAI,EAAE,CAAC;YACpB,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,gBAAgB;gBAChB,IAAI,CAAC,IAAI,CAAC,YAAY,EAAE,CAAC;oBACvB,OAAO,CAAC,KAAK,CAAC,oCAAoC,EAAE,GAAG,CAAC,CAAC;gBAC3D,CAAC;YACH,CAAC;QACH,CAAC;QACD,OAAO,MAAA,IAAI,CAAC,KAAK,CAAC,KAAK,EAAE,0CAAE,KAAK,CAAC;IACnC,CAAC;CACF"}

View File

@@ -0,0 +1,11 @@
import { BackoffOptions } from '../interfaces/backoff-options';
import { MinimalJob } from '../interfaces/minimal-job';
import { BackoffStrategy } from '../types/backoff-strategy';
export interface BuiltInStrategies {
[index: string]: (delay: number, jitter?: number) => BackoffStrategy;
}
export declare class Backoffs {
static builtinStrategies: BuiltInStrategies;
static normalize(backoff: number | BackoffOptions): BackoffOptions | undefined;
static calculate(backoff: BackoffOptions, attemptsMade: number, err: Error, job: MinimalJob, customStrategy?: BackoffStrategy): Promise<number> | number | undefined;
}

View File

@@ -0,0 +1,57 @@
export class Backoffs {
static normalize(backoff) {
if (Number.isFinite(backoff)) {
return {
type: 'fixed',
delay: backoff,
};
}
else if (backoff) {
return backoff;
}
}
static calculate(backoff, attemptsMade, err, job, customStrategy) {
if (backoff) {
const strategy = lookupStrategy(backoff, customStrategy);
return strategy(attemptsMade, backoff.type, err, job);
}
}
}
Backoffs.builtinStrategies = {
fixed: function (delay, jitter = 0) {
return function () {
if (jitter > 0) {
const minDelay = delay * (1 - jitter);
return Math.floor(Math.random() * delay * jitter + minDelay);
}
else {
return delay;
}
};
},
exponential: function (delay, jitter = 0) {
return function (attemptsMade) {
if (jitter > 0) {
const maxDelay = Math.round(Math.pow(2, attemptsMade - 1) * delay);
const minDelay = maxDelay * (1 - jitter);
return Math.floor(Math.random() * maxDelay * jitter + minDelay);
}
else {
return Math.round(Math.pow(2, attemptsMade - 1) * delay);
}
};
},
};
function lookupStrategy(backoff, customStrategy) {
if (backoff.type in Backoffs.builtinStrategies) {
return Backoffs.builtinStrategies[backoff.type](backoff.delay, backoff.jitter);
}
else if (customStrategy) {
return customStrategy;
}
else {
throw new Error(`Unknown backoff strategy ${backoff.type}.
If a custom backoff strategy is used, specify it when the queue is created.`);
}
}
//# sourceMappingURL=backoffs.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"backoffs.js","sourceRoot":"","sources":["../../../src/classes/backoffs.ts"],"names":[],"mappings":"AAQA,MAAM,OAAO,QAAQ;IA4BnB,MAAM,CAAC,SAAS,CACd,OAAgC;QAEhC,IAAI,MAAM,CAAC,QAAQ,CAAS,OAAO,CAAC,EAAE,CAAC;YACrC,OAAO;gBACL,IAAI,EAAE,OAAO;gBACb,KAAK,EAAU,OAAO;aACvB,CAAC;QACJ,CAAC;aAAM,IAAI,OAAO,EAAE,CAAC;YACnB,OAAuB,OAAO,CAAC;QACjC,CAAC;IACH,CAAC;IAED,MAAM,CAAC,SAAS,CACd,OAAuB,EACvB,YAAoB,EACpB,GAAU,EACV,GAAe,EACf,cAAgC;QAEhC,IAAI,OAAO,EAAE,CAAC;YACZ,MAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,EAAE,cAAc,CAAC,CAAC;YAEzD,OAAO,QAAQ,CAAC,YAAY,EAAE,OAAO,CAAC,IAAI,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;QACxD,CAAC;IACH,CAAC;;AApDM,0BAAiB,GAAsB;IAC5C,KAAK,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QACxC,OAAO;YACL,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,KAAK,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEtC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,KAAK,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAC/D,CAAC;iBAAM,CAAC;gBACN,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;IAED,WAAW,EAAE,UAAU,KAAa,EAAE,MAAM,GAAG,CAAC;QAC9C,OAAO,UAAU,YAAoB;YACnC,IAAI,MAAM,GAAG,CAAC,EAAE,CAAC;gBACf,MAAM,QAAQ,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;gBACnE,MAAM,QAAQ,GAAG,QAAQ,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,CAAC;gBAEzC,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,QAAQ,GAAG,MAAM,GAAG,QAAQ,CAAC,CAAC;YAClE,CAAC;iBAAM,CAAC;gBACN,OAAO,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC;YAC3D,CAAC;QACH,CAAC,CAAC;IACJ,CAAC;CACF,CAAC;AA8BJ,SAAS,cAAc,CACrB,OAAuB,EACvB,cAAgC;IAEhC,IAAI,OAAO,CAAC,IAAI,IAAI,QAAQ,CAAC,iBAAiB,EAAE,CAAC;QAC/C,OAAO,QAAQ,CAAC,iBAAiB,CAAC,OAAO,CAAC,IAAI,CAAC,CAC7C,OAAO,CAAC,KAAM,EACd,OAAO,CAAC,MAAM,CACf,CAAC;IACJ,CAAC;SAAM,IAAI,cAAc,EAAE,CAAC;QAC1B,OAAO,cAAc,CAAC;IACxB,CAAC;SAAM,CAAC;QACN,MAAM,IAAI,KAAK,CACb,4BAA4B,OAAO,CAAC,IAAI;kFACoC,CAC7E,CAAC;IACJ,CAAC;AACH,CAAC"}

View File

@@ -0,0 +1,23 @@
import { Child } from './child';
import { SandboxedOptions } from '../interfaces';
interface ChildPoolOpts extends SandboxedOptions {
mainFile?: string;
}
export declare class ChildPool {
retained: {
[key: number]: Child;
};
free: {
[key: string]: Child[];
};
private opts;
constructor({ mainFile, useWorkerThreads, workerForkOptions, workerThreadsOptions, }: ChildPoolOpts);
retain(processFile: string): Promise<Child>;
release(child: Child): void;
remove(child: Child): void;
kill(child: Child, signal?: 'SIGTERM' | 'SIGKILL'): Promise<void>;
clean(): Promise<void>;
getFree(id: string): Child[];
getAllFree(): Child[];
}
export {};

View File

@@ -0,0 +1,79 @@
import * as path from 'path';
import { Child } from './child';
const CHILD_KILL_TIMEOUT = 30000;
const supportCJS = () => {
return (typeof require === 'function' &&
typeof module === 'object' &&
typeof module.exports === 'object');
};
export class ChildPool {
constructor({ mainFile = supportCJS()
? path.join(process.cwd(), 'dist/cjs/classes/main.js')
: path.join(process.cwd(), 'dist/esm/classes/main.js'), useWorkerThreads, workerForkOptions, workerThreadsOptions, }) {
this.retained = {};
this.free = {};
this.opts = {
mainFile,
useWorkerThreads,
workerForkOptions,
workerThreadsOptions,
};
}
async retain(processFile) {
let child = this.getFree(processFile).pop();
if (child) {
this.retained[child.pid] = child;
return child;
}
child = new Child(this.opts.mainFile, processFile, {
useWorkerThreads: this.opts.useWorkerThreads,
workerForkOptions: this.opts.workerForkOptions,
workerThreadsOptions: this.opts.workerThreadsOptions,
});
child.on('exit', this.remove.bind(this, child));
try {
await child.init();
// Check status here as well, in case the child exited before we could
// retain it.
if (child.exitCode !== null || child.signalCode !== null) {
throw new Error('Child exited before it could be retained');
}
this.retained[child.pid] = child;
return child;
}
catch (err) {
console.error(err);
this.release(child);
throw err;
}
}
release(child) {
delete this.retained[child.pid];
this.getFree(child.processFile).push(child);
}
remove(child) {
delete this.retained[child.pid];
const free = this.getFree(child.processFile);
const childIndex = free.indexOf(child);
if (childIndex > -1) {
free.splice(childIndex, 1);
}
}
async kill(child, signal = 'SIGKILL') {
this.remove(child);
return child.kill(signal, CHILD_KILL_TIMEOUT);
}
async clean() {
const children = Object.values(this.retained).concat(this.getAllFree());
this.retained = {};
this.free = {};
await Promise.all(children.map(c => this.kill(c, 'SIGTERM')));
}
getFree(id) {
return (this.free[id] = this.free[id] || []);
}
getAllFree() {
return Object.values(this.free).reduce((first, second) => first.concat(second), []);
}
}
//# sourceMappingURL=child-pool.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"child-pool.js","sourceRoot":"","sources":["../../../src/classes/child-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,IAAI,MAAM,MAAM,CAAC;AAC7B,OAAO,EAAE,KAAK,EAAE,MAAM,SAAS,CAAC;AAGhC,MAAM,kBAAkB,GAAG,KAAM,CAAC;AAMlC,MAAM,UAAU,GAAG,GAAG,EAAE;IACtB,OAAO,CACL,OAAO,OAAO,KAAK,UAAU;QAC7B,OAAO,MAAM,KAAK,QAAQ;QAC1B,OAAO,MAAM,CAAC,OAAO,KAAK,QAAQ,CACnC,CAAC;AACJ,CAAC,CAAC;AAEF,MAAM,OAAO,SAAS;IAKpB,YAAY,EACV,QAAQ,GAAG,UAAU,EAAE;QACrB,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC;QACtD,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,0BAA0B,CAAC,EACxD,gBAAgB,EAChB,iBAAiB,EACjB,oBAAoB,GACN;QAXhB,aAAQ,GAA6B,EAAE,CAAC;QACxC,SAAI,GAA+B,EAAE,CAAC;QAWpC,IAAI,CAAC,IAAI,GAAG;YACV,QAAQ;YACR,gBAAgB;YAChB,iBAAiB;YACjB,oBAAoB;SACrB,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,MAAM,CAAC,WAAmB;QAC9B,IAAI,KAAK,GAAG,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,CAAC,GAAG,EAAE,CAAC;QAE5C,IAAI,KAAK,EAAE,CAAC;YACV,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YACjC,OAAO,KAAK,CAAC;QACf,CAAC;QAED,KAAK,GAAG,IAAI,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,WAAW,EAAE;YACjD,gBAAgB,EAAE,IAAI,CAAC,IAAI,CAAC,gBAAgB;YAC5C,iBAAiB,EAAE,IAAI,CAAC,IAAI,CAAC,iBAAiB;YAC9C,oBAAoB,EAAE,IAAI,CAAC,IAAI,CAAC,oBAAoB;SACrD,CAAC,CAAC;QAEH,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,CAAC;QAEhD,IAAI,CAAC;YACH,MAAM,KAAK,CAAC,IAAI,EAAE,CAAC;YAEnB,sEAAsE;YACtE,aAAa;YACb,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;gBACzD,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAC;YAC9D,CAAC;YAED,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;YAEjC,OAAO,KAAK,CAAC;QACf,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YACnB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;YACpB,MAAM,GAAG,CAAC;QACZ,CAAC;IACH,CAAC;IAED,OAAO,CAAC,KAAY;QAClB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAChC,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IAC9C,CAAC;IAED,MAAM,CAAC,KAAY;QACjB,OAAO,IAAI,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAEhC,MAAM,IAAI,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC;QAE7C,MAAM,UAAU,GAAG,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;QACvC,IAAI,UAAU,GAAG,CAAC,CAAC,EAAE,CAAC;YACpB,IAAI,CAAC,MAAM,CAAC,UAAU,EAAE,CAAC,CAAC,CAAC;QAC7B,CAAC;IACH,CAAC;IAED,KAAK,CAAC,IAAI,CACR,KAAY,EACZ,SAAgC,SAAS;QAEzC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QACnB,OAAO,KAAK,CAAC,IAAI,CAAC,MAAM,EAAE,kBAAkB,CAAC,CAAC;IAChD,CAAC;IAED,KAAK,CAAC,KAAK;QACT,MAAM,QAAQ,GAAG,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE,CAAC,CAAC;QACxE,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC;QACnB,IAAI,CAAC,IAAI,GAAG,EAAE,CAAC;QAEf,MAAM,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,EAAE,SAAS,CAAC,CAAC,CAAC,CAAC;IAChE,CAAC;IAED,OAAO,CAAC,EAAU;QAChB,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,GAAG,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC,CAAC;IAC/C,CAAC;IAED,UAAU;QACR,OAAO,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,MAAM,CACpC,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,EACvC,EAAE,CACH,CAAC;IACJ,CAAC;CACF"}

View File

@@ -0,0 +1,38 @@
import { Receiver, SandboxedJob } from '../interfaces';
import { JobJsonSandbox } from '../types';
declare enum ChildStatus {
Idle = 0,
Started = 1,
Terminating = 2,
Errored = 3
}
/**
* ChildProcessor
*
* This class acts as the interface between a child process and it parent process
* so that jobs can be processed in different processes.
*
*/
export declare class ChildProcessor {
private send;
private receiver;
status?: ChildStatus;
processor: any;
currentJobPromise: Promise<unknown> | undefined;
constructor(send: (msg: any) => Promise<void>, receiver: Receiver);
init(processorFile: string): Promise<void>;
start(jobJson: JobJsonSandbox, token?: string): Promise<void>;
stop(): Promise<void>;
waitForCurrentJobAndExit(): Promise<void>;
/**
* Enhance the given job argument with some functions
* that can be called from the sandboxed job processor.
*
* Note, the `job` argument is a JSON deserialized message
* from the main node process to this forked child process,
* the functions on the original job object are not in tact.
* The wrapped job adds back some of those original functions.
*/
protected wrapJob(job: JobJsonSandbox, send: (msg: any) => Promise<void>): SandboxedJob;
}
export {};

View File

@@ -0,0 +1,216 @@
import { ParentCommand } from '../enums';
import { errorToJSON } from '../utils';
var ChildStatus;
(function (ChildStatus) {
ChildStatus[ChildStatus["Idle"] = 0] = "Idle";
ChildStatus[ChildStatus["Started"] = 1] = "Started";
ChildStatus[ChildStatus["Terminating"] = 2] = "Terminating";
ChildStatus[ChildStatus["Errored"] = 3] = "Errored";
})(ChildStatus || (ChildStatus = {}));
const RESPONSE_TIMEOUT = process.env.NODE_ENV === 'test' ? 500 : 5000;
/**
* ChildProcessor
*
* This class acts as the interface between a child process and it parent process
* so that jobs can be processed in different processes.
*
*/
export class ChildProcessor {
constructor(send, receiver) {
this.send = send;
this.receiver = receiver;
}
async init(processorFile) {
let processor;
try {
const { default: processorFn } = await import(processorFile);
processor = processorFn;
if (processor.default) {
// support es2015 module.
processor = processor.default;
}
if (typeof processor !== 'function') {
throw new Error('No function is exported in processor file');
}
}
catch (err) {
this.status = ChildStatus.Errored;
return this.send({
cmd: ParentCommand.InitFailed,
err: errorToJSON(err),
});
}
const origProcessor = processor;
processor = function (job, token) {
try {
return Promise.resolve(origProcessor(job, token));
}
catch (err) {
return Promise.reject(err);
}
};
this.processor = processor;
this.status = ChildStatus.Idle;
await this.send({
cmd: ParentCommand.InitCompleted,
});
}
async start(jobJson, token) {
if (this.status !== ChildStatus.Idle) {
return this.send({
cmd: ParentCommand.Error,
err: errorToJSON(new Error('cannot start a not idling child process')),
});
}
this.status = ChildStatus.Started;
this.currentJobPromise = (async () => {
try {
const job = this.wrapJob(jobJson, this.send);
const result = await this.processor(job, token);
await this.send({
cmd: ParentCommand.Completed,
value: typeof result === 'undefined' ? null : result,
});
}
catch (err) {
await this.send({
cmd: ParentCommand.Failed,
value: errorToJSON(!err.message ? new Error(err) : err),
});
}
finally {
this.status = ChildStatus.Idle;
this.currentJobPromise = undefined;
}
})();
}
async stop() { }
async waitForCurrentJobAndExit() {
this.status = ChildStatus.Terminating;
try {
await this.currentJobPromise;
}
finally {
process.exit(process.exitCode || 0);
}
}
/**
* Enhance the given job argument with some functions
* that can be called from the sandboxed job processor.
*
* Note, the `job` argument is a JSON deserialized message
* from the main node process to this forked child process,
* the functions on the original job object are not in tact.
* The wrapped job adds back some of those original functions.
*/
wrapJob(job, send) {
const wrappedJob = Object.assign(Object.assign({}, job), { queueQualifiedName: job.queueQualifiedName, data: JSON.parse(job.data || '{}'), opts: job.opts, returnValue: JSON.parse(job.returnvalue || '{}'),
/*
* Proxy `updateProgress` function, should works as `progress` function.
*/
async updateProgress(progress) {
// Locally store reference to new progress value
// so that we can return it from this process synchronously.
this.progress = progress;
// Send message to update job progress.
await send({
cmd: ParentCommand.Progress,
value: progress,
});
},
/*
* Proxy job `log` function.
*/
log: async (row) => {
await send({
cmd: ParentCommand.Log,
value: row,
});
},
/*
* Proxy `moveToDelayed` function.
*/
moveToDelayed: async (timestamp, token) => {
await send({
cmd: ParentCommand.MoveToDelayed,
value: { timestamp, token },
});
},
/*
* Proxy `moveToWait` function.
*/
moveToWait: async (token) => {
await send({
cmd: ParentCommand.MoveToWait,
value: { token },
});
},
/*
* Proxy `moveToWaitingChildren` function.
*/
moveToWaitingChildren: async (token, opts) => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: ParentCommand.MoveToWaitingChildren,
value: { token, opts },
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'moveToWaitingChildren');
},
/*
* Proxy `updateData` function.
*/
updateData: async (data) => {
await send({
cmd: ParentCommand.Update,
value: data,
});
wrappedJob.data = data;
},
/**
* Proxy `getChildrenValues` function.
*/
getChildrenValues: async () => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: ParentCommand.GetChildrenValues,
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getChildrenValues');
},
/**
* Proxy `getIgnoredChildrenFailures` function.
*
* This method sends a request to retrieve the failures of ignored children
* and waits for a response from the parent process.
*
* @returns - A promise that resolves with the ignored children failures.
* The exact structure of the returned data depends on the parent process implementation.
*/
getIgnoredChildrenFailures: async () => {
const requestId = Math.random().toString(36).substring(2, 15);
await send({
requestId,
cmd: ParentCommand.GetIgnoredChildrenFailures,
});
return waitResponse(requestId, this.receiver, RESPONSE_TIMEOUT, 'getIgnoredChildrenFailures');
} });
return wrappedJob;
}
}
const waitResponse = async (requestId, receiver, timeout, cmd) => {
return new Promise((resolve, reject) => {
const listener = (msg) => {
if (msg.requestId === requestId) {
resolve(msg.value);
receiver.off('message', listener);
}
};
receiver.on('message', listener);
setTimeout(() => {
receiver.off('message', listener);
reject(new Error(`TimeoutError: ${cmd} timed out in (${timeout}ms)`));
}, timeout);
});
};
//# sourceMappingURL=child-processor.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,32 @@
import { ChildProcess } from 'child_process';
import { Worker } from 'worker_threads';
import { SandboxedOptions } from '../interfaces';
import { EventEmitter } from 'events';
/**
* Child class
*
* This class is used to create a child process or worker thread, and allows using
* isolated processes or threads for processing jobs.
*
*/
export declare class Child extends EventEmitter {
private mainFile;
processFile: string;
private opts;
childProcess: ChildProcess;
worker: Worker;
private _exitCode;
private _signalCode;
private _killed;
constructor(mainFile: string, processFile: string, opts?: SandboxedOptions);
get pid(): number;
get exitCode(): number;
get signalCode(): number;
get killed(): boolean;
init(): Promise<void>;
send(msg: any): Promise<void>;
private killProcess;
kill(signal?: 'SIGTERM' | 'SIGKILL', timeoutMs?: number): Promise<void>;
private initChild;
hasProcessExited(): boolean;
}

208
backend/node_modules/bullmq/dist/esm/classes/child.js generated vendored Normal file
View File

@@ -0,0 +1,208 @@
import { fork } from 'child_process';
import { createServer } from 'net';
import { Worker } from 'worker_threads';
import { ChildCommand, ParentCommand } from '../enums';
import { EventEmitter } from 'events';
/**
* @see https://nodejs.org/api/process.html#process_exit_codes
*/
const exitCodesErrors = {
1: 'Uncaught Fatal Exception',
2: 'Unused',
3: 'Internal JavaScript Parse Error',
4: 'Internal JavaScript Evaluation Failure',
5: 'Fatal Error',
6: 'Non-function Internal Exception Handler',
7: 'Internal Exception Handler Run-Time Failure',
8: 'Unused',
9: 'Invalid Argument',
10: 'Internal JavaScript Run-Time Failure',
12: 'Invalid Debug Argument',
13: 'Unfinished Top-Level Await',
};
/**
* Child class
*
* This class is used to create a child process or worker thread, and allows using
* isolated processes or threads for processing jobs.
*
*/
export class Child extends EventEmitter {
constructor(mainFile, processFile, opts = {
useWorkerThreads: false,
}) {
super();
this.mainFile = mainFile;
this.processFile = processFile;
this.opts = opts;
this._exitCode = null;
this._signalCode = null;
this._killed = false;
}
get pid() {
if (this.childProcess) {
return this.childProcess.pid;
}
else if (this.worker) {
// Worker threads pids can become negative when they are terminated
// so we need to use the absolute value to index the retained object
return Math.abs(this.worker.threadId);
}
else {
throw new Error('No child process or worker thread');
}
}
get exitCode() {
return this._exitCode;
}
get signalCode() {
return this._signalCode;
}
get killed() {
if (this.childProcess) {
return this.childProcess.killed;
}
return this._killed;
}
async init() {
const execArgv = await convertExecArgv(process.execArgv);
let parent;
if (this.opts.useWorkerThreads) {
this.worker = parent = new Worker(this.mainFile, Object.assign({ execArgv, stdin: true, stdout: true, stderr: true }, (this.opts.workerThreadsOptions
? this.opts.workerThreadsOptions
: {})));
}
else {
this.childProcess = parent = fork(this.mainFile, [], Object.assign({ execArgv, stdio: 'pipe' }, (this.opts.workerForkOptions ? this.opts.workerForkOptions : {})));
}
parent.on('exit', (exitCode, signalCode) => {
this._exitCode = exitCode;
// Coerce to null if undefined for backwards compatibility
signalCode = typeof signalCode === 'undefined' ? null : signalCode;
this._signalCode = signalCode;
this._killed = true;
this.emit('exit', exitCode, signalCode);
// Clean all listeners, we do not expect any more events after "exit"
parent.removeAllListeners();
this.removeAllListeners();
});
parent.on('error', (...args) => this.emit('error', ...args));
parent.on('message', (...args) => this.emit('message', ...args));
parent.on('close', (...args) => this.emit('close', ...args));
parent.stdout.pipe(process.stdout);
parent.stderr.pipe(process.stderr);
await this.initChild();
}
async send(msg) {
return new Promise((resolve, reject) => {
if (this.childProcess) {
this.childProcess.send(msg, (err) => {
if (err) {
reject(err);
}
else {
resolve();
}
});
}
else if (this.worker) {
resolve(this.worker.postMessage(msg));
}
else {
resolve();
}
});
}
killProcess(signal = 'SIGKILL') {
if (this.childProcess) {
this.childProcess.kill(signal);
}
else if (this.worker) {
this.worker.terminate();
}
}
async kill(signal = 'SIGKILL', timeoutMs) {
if (this.hasProcessExited()) {
return;
}
const onExit = onExitOnce(this.childProcess || this.worker);
this.killProcess(signal);
if (timeoutMs !== undefined && (timeoutMs === 0 || isFinite(timeoutMs))) {
const timeoutHandle = setTimeout(() => {
if (!this.hasProcessExited()) {
this.killProcess('SIGKILL');
}
}, timeoutMs);
await onExit;
clearTimeout(timeoutHandle);
}
await onExit;
}
async initChild() {
const onComplete = new Promise((resolve, reject) => {
const onMessageHandler = (msg) => {
if (msg.cmd === ParentCommand.InitCompleted) {
resolve();
}
else if (msg.cmd === ParentCommand.InitFailed) {
const err = new Error();
err.stack = msg.err.stack;
err.message = msg.err.message;
reject(err);
}
this.off('message', onMessageHandler);
this.off('close', onCloseHandler);
};
const onCloseHandler = (code, signal) => {
if (code > 128) {
code -= 128;
}
const msg = exitCodesErrors[code] || `Unknown exit code ${code}`;
reject(new Error(`Error initializing child: ${msg} and signal ${signal}`));
this.off('message', onMessageHandler);
this.off('close', onCloseHandler);
};
this.on('message', onMessageHandler);
this.on('close', onCloseHandler);
});
await this.send({
cmd: ChildCommand.Init,
value: this.processFile,
});
await onComplete;
}
hasProcessExited() {
return !!(this.exitCode !== null || this.signalCode);
}
}
function onExitOnce(child) {
return new Promise(resolve => {
child.once('exit', () => resolve());
});
}
const getFreePort = async () => {
return new Promise(resolve => {
const server = createServer();
server.listen(0, () => {
const { port } = server.address();
server.close(() => resolve(port));
});
});
};
const convertExecArgv = async (execArgv) => {
const standard = [];
const convertedArgs = [];
for (let i = 0; i < execArgv.length; i++) {
const arg = execArgv[i];
if (arg.indexOf('--inspect') === -1) {
standard.push(arg);
}
else {
const argName = arg.split('=')[0];
const port = await getFreePort();
convertedArgs.push(`${argName}=${port}`);
}
}
return standard.concat(convertedArgs);
};
//# sourceMappingURL=child.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,11 @@
export declare const DELAYED_ERROR = "bullmq:movedToDelayed";
/**
* DelayedError
*
* Error to be thrown when job is moved to delayed state
* from job in active state.
*
*/
export declare class DelayedError extends Error {
constructor(message?: string);
}

View File

@@ -0,0 +1,16 @@
export const DELAYED_ERROR = 'bullmq:movedToDelayed';
/**
* DelayedError
*
* Error to be thrown when job is moved to delayed state
* from job in active state.
*
*/
export class DelayedError extends Error {
constructor(message = DELAYED_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
//# sourceMappingURL=delayed-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"delayed-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/delayed-error.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,aAAa,GAAG,uBAAuB,CAAC;AAErD;;;;;;GAMG;AACH,MAAM,OAAO,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,aAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF"}

View File

@@ -0,0 +1,5 @@
export * from './delayed-error';
export * from './rate-limit-error';
export * from './unrecoverable-error';
export * from './waiting-children-error';
export * from './waiting-error';

View File

@@ -0,0 +1,6 @@
export * from './delayed-error';
export * from './rate-limit-error';
export * from './unrecoverable-error';
export * from './waiting-children-error';
export * from './waiting-error';
//# sourceMappingURL=index.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../src/classes/errors/index.ts"],"names":[],"mappings":"AAAA,cAAc,iBAAiB,CAAC;AAChC,cAAc,oBAAoB,CAAC;AACnC,cAAc,uBAAuB,CAAC;AACtC,cAAc,0BAA0B,CAAC;AACzC,cAAc,iBAAiB,CAAC"}

View File

@@ -0,0 +1,10 @@
export declare const RATE_LIMIT_ERROR = "bullmq:rateLimitExceeded";
/**
* RateLimitError
*
* Error to be thrown when queue reaches a rate limit.
*
*/
export declare class RateLimitError extends Error {
constructor(message?: string);
}

View File

@@ -0,0 +1,15 @@
export const RATE_LIMIT_ERROR = 'bullmq:rateLimitExceeded';
/**
* RateLimitError
*
* Error to be thrown when queue reaches a rate limit.
*
*/
export class RateLimitError extends Error {
constructor(message = RATE_LIMIT_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
//# sourceMappingURL=rate-limit-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"rate-limit-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/rate-limit-error.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,gBAAgB,GAAG,0BAA0B,CAAC;AAE3D;;;;;GAKG;AACH,MAAM,OAAO,cAAe,SAAQ,KAAK;IACvC,YAAY,UAAkB,gBAAgB;QAC5C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF"}

View File

@@ -0,0 +1,11 @@
export declare const UNRECOVERABLE_ERROR = "bullmq:unrecoverable";
/**
* UnrecoverableError
*
* Error to move a job to failed even if the attemptsMade
* are lower than the expected limit.
*
*/
export declare class UnrecoverableError extends Error {
constructor(message?: string);
}

View File

@@ -0,0 +1,16 @@
export const UNRECOVERABLE_ERROR = 'bullmq:unrecoverable';
/**
* UnrecoverableError
*
* Error to move a job to failed even if the attemptsMade
* are lower than the expected limit.
*
*/
export class UnrecoverableError extends Error {
constructor(message = UNRECOVERABLE_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
//# sourceMappingURL=unrecoverable-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"unrecoverable-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/unrecoverable-error.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,mBAAmB,GAAG,sBAAsB,CAAC;AAE1D;;;;;;GAMG;AACH,MAAM,OAAO,kBAAmB,SAAQ,KAAK;IAC3C,YAAY,UAAkB,mBAAmB;QAC/C,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF"}

View File

@@ -0,0 +1,11 @@
export declare const WAITING_CHILDREN_ERROR = "bullmq:movedToWaitingChildren";
/**
* WaitingChildrenError
*
* Error to be thrown when job is moved to waiting-children state
* from job in active state.
*
*/
export declare class WaitingChildrenError extends Error {
constructor(message?: string);
}

View File

@@ -0,0 +1,16 @@
export const WAITING_CHILDREN_ERROR = 'bullmq:movedToWaitingChildren';
/**
* WaitingChildrenError
*
* Error to be thrown when job is moved to waiting-children state
* from job in active state.
*
*/
export class WaitingChildrenError extends Error {
constructor(message = WAITING_CHILDREN_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
//# sourceMappingURL=waiting-children-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"waiting-children-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-children-error.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,sBAAsB,GAAG,+BAA+B,CAAC;AAEtE;;;;;;GAMG;AACH,MAAM,OAAO,oBAAqB,SAAQ,KAAK;IAC7C,YAAY,UAAkB,sBAAsB;QAClD,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF"}

View File

@@ -0,0 +1,10 @@
export declare const WAITING_ERROR = "bullmq:movedToWait";
/**
* WaitingError
*
* Error to be thrown when job is moved to wait or prioritized state
* from job in active state.
*/
export declare class WaitingError extends Error {
constructor(message?: string);
}

View File

@@ -0,0 +1,15 @@
export const WAITING_ERROR = 'bullmq:movedToWait';
/**
* WaitingError
*
* Error to be thrown when job is moved to wait or prioritized state
* from job in active state.
*/
export class WaitingError extends Error {
constructor(message = WAITING_ERROR) {
super(message);
this.name = this.constructor.name;
Object.setPrototypeOf(this, new.target.prototype);
}
}
//# sourceMappingURL=waiting-error.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"waiting-error.js","sourceRoot":"","sources":["../../../../src/classes/errors/waiting-error.ts"],"names":[],"mappings":"AAAA,MAAM,CAAC,MAAM,aAAa,GAAG,oBAAoB,CAAC;AAElD;;;;;GAKG;AACH,MAAM,OAAO,YAAa,SAAQ,KAAK;IACrC,YAAY,UAAkB,aAAa;QACzC,KAAK,CAAC,OAAO,CAAC,CAAC;QACf,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;QAClC,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,GAAG,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;IACpD,CAAC;CACF"}

View File

@@ -0,0 +1,172 @@
import { EventEmitter } from 'events';
import { ChainableCommander } from 'ioredis';
import { FlowJob, FlowQueuesOpts, FlowOpts, IoredisListener, ParentOptions, QueueBaseOptions, RedisClient, Tracer, ContextManager } from '../interfaces';
import { Job } from './job';
import { KeysMap, QueueKeys } from './queue-keys';
import { RedisConnection } from './redis-connection';
export interface AddNodeOpts {
multi: ChainableCommander;
node: FlowJob;
parent?: {
parentOpts: ParentOptions;
parentDependenciesKey: string;
};
/**
* Queues options that will be applied in each node depending on queue name presence.
*/
queuesOpts?: FlowQueuesOpts;
}
export interface AddChildrenOpts {
multi: ChainableCommander;
nodes: FlowJob[];
parent: {
parentOpts: ParentOptions;
parentDependenciesKey: string;
};
queuesOpts?: FlowQueuesOpts;
}
export interface NodeOpts {
/**
* Root job queue name.
*/
queueName: string;
/**
* Prefix included in job key.
*/
prefix?: string;
/**
* Root job id.
*/
id: string;
/**
* Maximum depth or levels to visit in the tree.
*/
depth?: number;
/**
* Maximum quantity of children per type (processed, unprocessed).
*/
maxChildren?: number;
}
export interface JobNode {
job: Job;
children?: JobNode[];
}
export interface FlowProducerListener extends IoredisListener {
/**
* Listen to 'error' event.
*
* This event is triggered when an error is throw.
*/
error: (failedReason: Error) => void;
}
/**
* This class allows to add jobs with dependencies between them in such
* a way that it is possible to build complex flows.
* Note: A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
* All Jobs can be in different queues, either children or parents,
*/
export declare class FlowProducer extends EventEmitter {
opts: QueueBaseOptions;
toKey: (name: string, type: string) => string;
keys: KeysMap;
closing: Promise<void> | undefined;
queueKeys: QueueKeys;
protected connection: RedisConnection;
protected telemetry: {
tracer: Tracer | undefined;
contextManager: ContextManager | undefined;
};
constructor(opts?: QueueBaseOptions, Connection?: typeof RedisConnection);
emit<U extends keyof FlowProducerListener>(event: U, ...args: Parameters<FlowProducerListener[U]>): boolean;
off<U extends keyof FlowProducerListener>(eventName: U, listener: FlowProducerListener[U]): this;
on<U extends keyof FlowProducerListener>(event: U, listener: FlowProducerListener[U]): this;
once<U extends keyof FlowProducerListener>(event: U, listener: FlowProducerListener[U]): this;
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client(): Promise<RedisClient>;
/**
* Helper to easily extend Job class calls.
*/
protected get Job(): typeof Job;
waitUntilReady(): Promise<RedisClient>;
/**
* Adds a flow.
*
* This call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flow - an object with a tree-like structure where children jobs
* will be processed before their parents.
* @param opts - options that will be applied to the flow object.
*/
add(flow: FlowJob, opts?: FlowOpts): Promise<JobNode>;
/**
* Get a flow.
*
* @param opts - an object with options for getting a JobNode.
*/
getFlow(opts: NodeOpts): Promise<JobNode>;
/**
* Adds multiple flows.
*
* A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
*
* All Jobs can be in different queues, either children or parents,
* however this call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flows - an array of objects with a tree-like structure where children jobs
* will be processed before their parents.
*/
addBulk(flows: FlowJob[]): Promise<JobNode[]>;
/**
* Add a node (job) of a flow to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param node - the node representing a job to be added to some queue
* @param parent - parent data sent to children to create the "links" to their parent
* @returns
*/
protected addNode({ multi, node, parent, queuesOpts, }: AddNodeOpts): Promise<JobNode>;
/**
* Adds nodes (jobs) of multiple flows to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param nodes - the nodes representing jobs to be added to some queue
* @returns
*/
protected addNodes(multi: ChainableCommander, nodes: FlowJob[]): Promise<JobNode[]>;
private getNode;
private addChildren;
private getChildren;
/**
* Helper factory method that creates a queue-like object
* required to create jobs in any queue.
*
* @param node -
* @param queueKeys -
* @returns
*/
private queueFromNode;
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
close(): Promise<void>;
/**
*
* Force disconnects a connection.
*/
disconnect(): Promise<void>;
}

View File

@@ -0,0 +1,350 @@
import { EventEmitter } from 'events';
import { v4 } from 'uuid';
import { getParentKey, isRedisInstance, trace } from '../utils';
import { Job } from './job';
import { QueueKeys } from './queue-keys';
import { RedisConnection } from './redis-connection';
import { SpanKind, TelemetryAttributes } from '../enums';
/**
* This class allows to add jobs with dependencies between them in such
* a way that it is possible to build complex flows.
* Note: A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
* All Jobs can be in different queues, either children or parents,
*/
export class FlowProducer extends EventEmitter {
constructor(opts = { connection: {} }, Connection = RedisConnection) {
super();
this.opts = opts;
this.opts = Object.assign({ prefix: 'bull' }, opts);
this.connection = new Connection(opts.connection, {
shared: isRedisInstance(opts.connection),
blocking: false,
skipVersionCheck: opts.skipVersionCheck,
skipWaitingForReady: opts.skipWaitingForReady,
});
this.connection.on('error', (error) => this.emit('error', error));
this.connection.on('close', () => {
if (!this.closing) {
this.emit('ioredis:close');
}
});
this.queueKeys = new QueueKeys(opts.prefix);
if (opts === null || opts === void 0 ? void 0 : opts.telemetry) {
this.telemetry = opts.telemetry;
}
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client() {
return this.connection.client;
}
/**
* Helper to easily extend Job class calls.
*/
get Job() {
return Job;
}
waitUntilReady() {
return this.client;
}
/**
* Adds a flow.
*
* This call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flow - an object with a tree-like structure where children jobs
* will be processed before their parents.
* @param opts - options that will be applied to the flow object.
*/
async add(flow, opts) {
var _a;
if (this.closing) {
return;
}
const client = await this.connection.client;
const multi = client.multi();
const parentOpts = (_a = flow === null || flow === void 0 ? void 0 : flow.opts) === null || _a === void 0 ? void 0 : _a.parent;
const parentKey = getParentKey(parentOpts);
const parentDependenciesKey = parentKey
? `${parentKey}:dependencies`
: undefined;
return trace(this.telemetry, SpanKind.PRODUCER, flow.queueName, 'addFlow', flow.queueName, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.FlowName]: flow.name,
});
const jobsTree = await this.addNode({
multi,
node: flow,
queuesOpts: opts === null || opts === void 0 ? void 0 : opts.queuesOptions,
parent: {
parentOpts,
parentDependenciesKey,
},
});
await multi.exec();
return jobsTree;
});
}
/**
* Get a flow.
*
* @param opts - an object with options for getting a JobNode.
*/
async getFlow(opts) {
if (this.closing) {
return;
}
const client = await this.connection.client;
const updatedOpts = Object.assign({
depth: 10,
maxChildren: 20,
prefix: this.opts.prefix,
}, opts);
const jobsTree = this.getNode(client, updatedOpts);
return jobsTree;
}
/**
* Adds multiple flows.
*
* A flow is a tree-like structure of jobs that depend on each other.
* Whenever the children of a given parent are completed, the parent
* will be processed, being able to access the children's result data.
*
* All Jobs can be in different queues, either children or parents,
* however this call would be atomic, either it fails and no jobs will
* be added to the queues, or it succeeds and all jobs will be added.
*
* @param flows - an array of objects with a tree-like structure where children jobs
* will be processed before their parents.
*/
async addBulk(flows) {
if (this.closing) {
return;
}
const client = await this.connection.client;
const multi = client.multi();
return trace(this.telemetry, SpanKind.PRODUCER, '', 'addBulkFlows', '', async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.BulkCount]: flows.length,
[TelemetryAttributes.BulkNames]: flows
.map(flow => flow.name)
.join(','),
});
const jobsTrees = await this.addNodes(multi, flows);
await multi.exec();
return jobsTrees;
});
}
/**
* Add a node (job) of a flow to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param node - the node representing a job to be added to some queue
* @param parent - parent data sent to children to create the "links" to their parent
* @returns
*/
async addNode({ multi, node, parent, queuesOpts, }) {
var _a, _b;
const prefix = node.prefix || this.opts.prefix;
const queue = this.queueFromNode(node, new QueueKeys(prefix), prefix);
const queueOpts = queuesOpts && queuesOpts[node.queueName];
const jobsOpts = (_a = queueOpts === null || queueOpts === void 0 ? void 0 : queueOpts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
const jobId = ((_b = node.opts) === null || _b === void 0 ? void 0 : _b.jobId) || v4();
return trace(this.telemetry, SpanKind.PRODUCER, node.queueName, 'addNode', node.queueName, async (span, srcPropagationMedatada) => {
var _a, _b;
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobName]: node.name,
[TelemetryAttributes.JobId]: jobId,
});
const opts = node.opts;
let telemetry = opts === null || opts === void 0 ? void 0 : opts.telemetry;
if (srcPropagationMedatada && opts) {
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
const job = new this.Job(queue, node.name, node.data, Object.assign(Object.assign(Object.assign({}, jobsOpts), opts), { parent: parent === null || parent === void 0 ? void 0 : parent.parentOpts, telemetry }), jobId);
const parentKey = getParentKey(parent === null || parent === void 0 ? void 0 : parent.parentOpts);
if (node.children && node.children.length > 0) {
// Create the parent job, it will be a job in status "waiting-children".
const parentId = jobId;
const queueKeysParent = new QueueKeys(node.prefix || this.opts.prefix);
await job.addJob(multi, {
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
addToWaitingChildren: true,
parentKey,
});
const parentDependenciesKey = `${queueKeysParent.toKey(node.queueName, parentId)}:dependencies`;
const children = await this.addChildren({
multi,
nodes: node.children,
parent: {
parentOpts: {
id: parentId,
queue: queueKeysParent.getQueueQualifiedName(node.queueName),
},
parentDependenciesKey,
},
queuesOpts,
});
return { job, children };
}
else {
await job.addJob(multi, {
parentDependenciesKey: parent === null || parent === void 0 ? void 0 : parent.parentDependenciesKey,
parentKey,
});
return { job };
}
});
}
/**
* Adds nodes (jobs) of multiple flows to the queue. This method will recursively
* add all its children as well. Note that a given job can potentially be
* a parent and a child job at the same time depending on where it is located
* in the tree hierarchy.
*
* @param multi - ioredis ChainableCommander
* @param nodes - the nodes representing jobs to be added to some queue
* @returns
*/
addNodes(multi, nodes) {
return Promise.all(nodes.map(node => {
var _a;
const parentOpts = (_a = node === null || node === void 0 ? void 0 : node.opts) === null || _a === void 0 ? void 0 : _a.parent;
const parentKey = getParentKey(parentOpts);
const parentDependenciesKey = parentKey
? `${parentKey}:dependencies`
: undefined;
return this.addNode({
multi,
node,
parent: {
parentOpts,
parentDependenciesKey,
},
});
}));
}
async getNode(client, node) {
const queue = this.queueFromNode(node, new QueueKeys(node.prefix), node.prefix);
const job = await this.Job.fromId(queue, node.id);
if (job) {
const { processed = {}, unprocessed = [], failed = [], ignored = {}, } = await job.getDependencies({
failed: {
count: node.maxChildren,
},
processed: {
count: node.maxChildren,
},
unprocessed: {
count: node.maxChildren,
},
ignored: {
count: node.maxChildren,
},
});
const processedKeys = Object.keys(processed);
const ignoredKeys = Object.keys(ignored);
const childrenCount = processedKeys.length +
unprocessed.length +
ignoredKeys.length +
failed.length;
const newDepth = node.depth - 1;
if (childrenCount > 0 && newDepth) {
const children = await this.getChildren(client, [...processedKeys, ...unprocessed, ...failed, ...ignoredKeys], newDepth, node.maxChildren);
return { job, children };
}
else {
return { job };
}
}
}
addChildren({ multi, nodes, parent, queuesOpts }) {
return Promise.all(nodes.map(node => this.addNode({ multi, node, parent, queuesOpts })));
}
getChildren(client, childrenKeys, depth, maxChildren) {
const getChild = (key) => {
const [prefix, queueName, id] = key.split(':');
return this.getNode(client, {
id,
queueName,
prefix,
depth,
maxChildren,
});
};
return Promise.all([...childrenKeys.map(getChild)]);
}
/**
* Helper factory method that creates a queue-like object
* required to create jobs in any queue.
*
* @param node -
* @param queueKeys -
* @returns
*/
queueFromNode(node, queueKeys, prefix) {
return {
client: this.connection.client,
name: node.queueName,
keys: queueKeys.getKeys(node.queueName),
toKey: (type) => queueKeys.toKey(node.queueName, type),
opts: { prefix, connection: {} },
qualifiedName: queueKeys.getQueueQualifiedName(node.queueName),
closing: this.closing,
waitUntilReady: async () => this.connection.client,
removeListener: this.removeListener.bind(this),
emit: this.emit.bind(this),
on: this.on.bind(this),
redisVersion: this.connection.redisVersion,
trace: async () => { },
};
}
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
}
/**
*
* Force disconnects a connection.
*/
disconnect() {
return this.connection.disconnect();
}
}
//# sourceMappingURL=flow-producer.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,21 @@
export * from './async-fifo-queue';
export * from './backoffs';
export * from './child';
export * from './child-pool';
export * from './child-processor';
export * from './errors';
export * from './flow-producer';
export * from './job';
export * from './job-scheduler';
export * from './lock-manager';
export * from './queue-base';
export * from './queue-events';
export * from './queue-events-producer';
export * from './queue-getters';
export * from './queue-keys';
export * from './queue';
export * from './redis-connection';
export * from './repeat';
export * from './sandbox';
export * from './scripts';
export * from './worker';

24
backend/node_modules/bullmq/dist/esm/classes/index.js generated vendored Normal file
View File

@@ -0,0 +1,24 @@
export * from './async-fifo-queue';
export * from './backoffs';
export * from './child';
export * from './child-pool';
export * from './child-processor';
export * from './errors';
export * from './flow-producer';
export * from './job';
export * from './job-scheduler';
// export * from './main'; this file must not be exported
// export * from './main-worker'; this file must not be exported
export * from './lock-manager';
export * from './queue-base';
export * from './queue-events';
export * from './queue-events-producer';
export * from './queue-getters';
export * from './queue-keys';
export * from './queue';
export * from './redis-connection';
export * from './repeat';
export * from './sandbox';
export * from './scripts';
export * from './worker';
//# sourceMappingURL=index.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/classes/index.ts"],"names":[],"mappings":"AAAA,cAAc,oBAAoB,CAAC;AACnC,cAAc,YAAY,CAAC;AAC3B,cAAc,SAAS,CAAC;AACxB,cAAc,cAAc,CAAC;AAC7B,cAAc,mBAAmB,CAAC;AAClC,cAAc,UAAU,CAAC;AACzB,cAAc,iBAAiB,CAAC;AAChC,cAAc,OAAO,CAAC;AACtB,cAAc,iBAAiB,CAAC;AAChC,yDAAyD;AACzD,gEAAgE;AAChE,cAAc,gBAAgB,CAAC;AAC/B,cAAc,cAAc,CAAC;AAC7B,cAAc,gBAAgB,CAAC;AAC/B,cAAc,yBAAyB,CAAC;AACxC,cAAc,iBAAiB,CAAC;AAChC,cAAc,cAAc,CAAC;AAC7B,cAAc,SAAS,CAAC;AACxB,cAAc,oBAAoB,CAAC;AACnC,cAAc,UAAU,CAAC;AACzB,cAAc,WAAW,CAAC;AAC1B,cAAc,WAAW,CAAC;AAC1B,cAAc,UAAU,CAAC"}

View File

@@ -0,0 +1,24 @@
import { JobSchedulerJson, RepeatBaseOptions, RepeatOptions } from '../interfaces';
import { JobSchedulerTemplateOptions } from '../types';
import { Job } from './job';
import { QueueBase } from './queue-base';
import { RedisConnection } from './redis-connection';
export declare class JobScheduler extends QueueBase {
private repeatStrategy;
constructor(name: string, opts: RepeatBaseOptions, Connection?: typeof RedisConnection);
upsertJobScheduler<T = any, R = any, N extends string = string>(jobSchedulerId: string, repeatOpts: Omit<RepeatOptions, 'key' | 'prevMillis'>, jobName: N, jobData: T, opts: JobSchedulerTemplateOptions, { override, producerId }: {
override: boolean;
producerId?: string;
}): Promise<Job<T, R, N> | undefined>;
private getNextJobOpts;
removeJobScheduler(jobSchedulerId: string): Promise<number>;
private getSchedulerData;
private transformSchedulerData;
private keyToData;
getScheduler<D = any>(id: string): Promise<JobSchedulerJson<D> | undefined>;
private getTemplateFromJSON;
getJobSchedulers<D = any>(start?: number, end?: number, asc?: boolean): Promise<JobSchedulerJson<D>[]>;
getSchedulersCount(): Promise<number>;
private getSchedulerNextJobId;
}
export declare const defaultRepeatStrategy: (millis: number, opts: RepeatOptions) => number | undefined;

View File

@@ -0,0 +1,242 @@
import { __rest } from "tslib";
import { parseExpression } from 'cron-parser';
import { Job } from './job';
import { QueueBase } from './queue-base';
import { SpanKind, TelemetryAttributes } from '../enums';
import { array2obj } from '../utils';
export class JobScheduler extends QueueBase {
constructor(name, opts, Connection) {
super(name, opts, Connection);
this.repeatStrategy =
(opts.settings && opts.settings.repeatStrategy) || defaultRepeatStrategy;
}
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobName, jobData, opts, { override, producerId }) {
const { every, limit, pattern, offset } = repeatOpts;
if (pattern && every) {
throw new Error('Both .pattern and .every options are defined for this repeatable job');
}
if (!pattern && !every) {
throw new Error('Either .pattern or .every options must be defined for this repeatable job');
}
if (repeatOpts.immediately && repeatOpts.startDate) {
throw new Error('Both .immediately and .startDate options are defined for this repeatable job');
}
if (repeatOpts.immediately && repeatOpts.every) {
console.warn("Using option immediately with every does not affect the job's schedule. Job will run immediately anyway.");
}
// Check if we reached the limit of the repeatable job's iterations
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
if (typeof repeatOpts.limit !== 'undefined' &&
iterationCount > repeatOpts.limit) {
return;
}
// Check if we reached the end date of the repeatable job
let now = Date.now();
const { endDate } = repeatOpts;
if (endDate && now > new Date(endDate).getTime()) {
return;
}
const prevMillis = opts.prevMillis || 0;
now = prevMillis < now ? now : prevMillis;
// Check if we have a start date for the repeatable job
const { immediately } = repeatOpts, filteredRepeatOpts = __rest(repeatOpts, ["immediately"]);
let nextMillis;
const newOffset = null;
if (pattern) {
nextMillis = await this.repeatStrategy(now, repeatOpts, jobName);
if (nextMillis < now) {
nextMillis = now;
}
}
if (nextMillis || every) {
return this.trace(SpanKind.PRODUCER, 'add', `${this.name}.${jobName}`, async (span, srcPropagationMedatada) => {
var _a, _b;
let telemetry = opts.telemetry;
if (srcPropagationMedatada) {
const omitContext = (_a = opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext;
const telemetryMetadata = ((_b = opts.telemetry) === null || _b === void 0 ? void 0 : _b.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
const mergedOpts = this.getNextJobOpts(nextMillis, jobSchedulerId, Object.assign(Object.assign({}, opts), { repeat: filteredRepeatOpts, telemetry }), iterationCount, newOffset);
if (override) {
// Clamp nextMillis to now if it's in the past
if (nextMillis < now) {
nextMillis = now;
}
const [jobId, delay] = await this.scripts.addJobScheduler(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), Job.optsAsJSON(opts), {
name: jobName,
startDate: repeatOpts.startDate
? new Date(repeatOpts.startDate).getTime()
: undefined,
endDate: endDate ? new Date(endDate).getTime() : undefined,
tz: repeatOpts.tz,
pattern,
every,
limit,
offset: newOffset,
}, Job.optsAsJSON(mergedOpts), producerId);
// Ensure delay is a number (Dragonflydb may return it as a string)
const numericDelay = typeof delay === 'string' ? parseInt(delay, 10) : delay;
const job = new this.Job(this, jobName, jobData, Object.assign(Object.assign({}, mergedOpts), { delay: numericDelay }), jobId);
job.id = jobId;
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
[TelemetryAttributes.JobId]: job.id,
});
return job;
}
else {
const jobId = await this.scripts.updateJobSchedulerNextMillis(jobSchedulerId, nextMillis, JSON.stringify(typeof jobData === 'undefined' ? {} : jobData), Job.optsAsJSON(mergedOpts), producerId);
if (jobId) {
const job = new this.Job(this, jobName, jobData, mergedOpts, jobId);
job.id = jobId;
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobSchedulerId]: jobSchedulerId,
[TelemetryAttributes.JobId]: job.id,
});
return job;
}
}
});
}
}
getNextJobOpts(nextMillis, jobSchedulerId, opts, currentCount, offset) {
var _a, _b;
//
// Generate unique job id for this iteration.
//
const jobId = this.getSchedulerNextJobId({
jobSchedulerId,
nextMillis,
});
const now = Date.now();
const delay = nextMillis + offset - now;
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey: jobSchedulerId });
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { offset, count: currentCount, startDate: ((_a = opts.repeat) === null || _a === void 0 ? void 0 : _a.startDate)
? new Date(opts.repeat.startDate).getTime()
: undefined, endDate: ((_b = opts.repeat) === null || _b === void 0 ? void 0 : _b.endDate)
? new Date(opts.repeat.endDate).getTime()
: undefined });
return mergedOpts;
}
async removeJobScheduler(jobSchedulerId) {
return this.scripts.removeJobScheduler(jobSchedulerId);
}
async getSchedulerData(client, key, next) {
const jobData = await client.hgetall(this.toKey('repeat:' + key));
return this.transformSchedulerData(key, jobData, next);
}
transformSchedulerData(key, jobData, next) {
if (jobData) {
const jobSchedulerData = {
key,
name: jobData.name,
next,
};
if (jobData.ic) {
jobSchedulerData.iterationCount = parseInt(jobData.ic);
}
if (jobData.limit) {
jobSchedulerData.limit = parseInt(jobData.limit);
}
if (jobData.startDate) {
jobSchedulerData.startDate = parseInt(jobData.startDate);
}
if (jobData.endDate) {
jobSchedulerData.endDate = parseInt(jobData.endDate);
}
if (jobData.tz) {
jobSchedulerData.tz = jobData.tz;
}
if (jobData.pattern) {
jobSchedulerData.pattern = jobData.pattern;
}
if (jobData.every) {
jobSchedulerData.every = parseInt(jobData.every);
}
if (jobData.offset) {
jobSchedulerData.offset = parseInt(jobData.offset);
}
if (jobData.data || jobData.opts) {
jobSchedulerData.template = this.getTemplateFromJSON(jobData.data, jobData.opts);
}
return jobSchedulerData;
}
// TODO: remove this check and keyToData as it is here only to support legacy code
if (key.includes(':')) {
return this.keyToData(key, next);
}
}
keyToData(key, next) {
const data = key.split(':');
const pattern = data.slice(4).join(':') || null;
return {
key,
name: data[0],
id: data[1] || null,
endDate: parseInt(data[2]) || null,
tz: data[3] || null,
pattern,
next,
};
}
async getScheduler(id) {
const [rawJobData, next] = await this.scripts.getJobScheduler(id);
return this.transformSchedulerData(id, rawJobData ? array2obj(rawJobData) : null, next ? parseInt(next) : null);
}
getTemplateFromJSON(rawData, rawOpts) {
const template = {};
if (rawData) {
template.data = JSON.parse(rawData);
}
if (rawOpts) {
template.opts = Job.optsFromJSON(rawOpts);
}
return template;
}
async getJobSchedulers(start = 0, end = -1, asc = false) {
const client = await this.client;
const jobSchedulersKey = this.keys.repeat;
const result = asc
? await client.zrange(jobSchedulersKey, start, end, 'WITHSCORES')
: await client.zrevrange(jobSchedulersKey, start, end, 'WITHSCORES');
const jobs = [];
for (let i = 0; i < result.length; i += 2) {
jobs.push(this.getSchedulerData(client, result[i], parseInt(result[i + 1])));
}
return Promise.all(jobs);
}
async getSchedulersCount() {
const jobSchedulersKey = this.keys.repeat;
const client = await this.client;
return client.zcard(jobSchedulersKey);
}
getSchedulerNextJobId({ nextMillis, jobSchedulerId, }) {
return `repeat:${jobSchedulerId}:${nextMillis}`;
}
}
export const defaultRepeatStrategy = (millis, opts) => {
const { pattern } = opts;
const dateFromMillis = new Date(millis);
const startDate = opts.startDate && new Date(opts.startDate);
const currentDate = startDate > dateFromMillis ? startDate : dateFromMillis;
const interval = parseExpression(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
try {
if (opts.immediately) {
return new Date().getTime();
}
else {
return interval.next().getTime();
}
}
catch (e) {
// Ignore error
}
};
//# sourceMappingURL=job-scheduler.js.map

File diff suppressed because one or more lines are too long

489
backend/node_modules/bullmq/dist/esm/classes/job.d.ts generated vendored Normal file
View File

@@ -0,0 +1,489 @@
import { BulkJobOptions, DependenciesOpts, JobJson, JobJsonRaw, MinimalJob, MinimalQueue, MoveToWaitingChildrenOpts, ParentKeys, ParentKeyOpts, RedisClient, RetryOptions } from '../interfaces';
import { FinishedStatus, JobsOptions, JobState, JobJsonSandbox, RedisJobOptions, JobProgress } from '../types';
import { Scripts } from './scripts';
import type { QueueEvents } from './queue-events';
export declare const PRIORITY_LIMIT: number;
/**
* Job
*
* This class represents a Job in the queue. Normally job are implicitly created when
* you add a job to the queue with methods such as Queue.addJob( ... )
*
* A Job instance is also passed to the Worker's process function.
*
*/
export declare class Job<DataType = any, ReturnType = any, NameType extends string = string> implements MinimalJob<DataType, ReturnType, NameType> {
protected queue: MinimalQueue;
/**
* The name of the Job
*/
name: NameType;
/**
* The payload for this job.
*/
data: DataType;
/**
* The options object for this job.
*/
opts: JobsOptions;
id?: string;
/**
* It includes the prefix, the namespace separator :, and queue name.
* @see {@link https://www.gnu.org/software/gawk/manual/html_node/Qualified-Names.html}
*/
readonly queueQualifiedName: string;
/**
* The progress a job has performed so far.
* @defaultValue 0
*/
progress: JobProgress;
/**
* The value returned by the processor when processing this job.
* @defaultValue null
*/
returnvalue: ReturnType;
/**
* Stacktrace for the error (for failed jobs).
* @defaultValue null
*/
stacktrace: string[];
/**
* An amount of milliseconds to wait until this job can be processed.
* @defaultValue 0
*/
delay: number;
/**
* Ranges from 0 (highest priority) to 2 097 152 (lowest priority). Note that
* using priorities has a slight impact on performance,
* so do not use it if not required.
* @defaultValue 0
*/
priority: number;
/**
* Timestamp when the job was created (unless overridden with job options).
*/
timestamp: number;
/**
* Number of attempts when job is moved to active.
* @defaultValue 0
*/
attemptsStarted: number;
/**
* Number of attempts after the job has failed.
* @defaultValue 0
*/
attemptsMade: number;
/**
* Number of times where job has stalled.
* @defaultValue 0
*/
stalledCounter: number;
/**
* Reason for failing.
*/
failedReason: string;
/**
* Deferred failure. Stores a failed message and marks this job to be failed directly
* as soon as the job is picked up by a worker, and using this string as the failed reason.
*/
deferredFailure: string;
/**
* Timestamp for when the job finished (completed or failed).
*/
finishedOn?: number;
/**
* Timestamp for when the job was processed.
*/
processedOn?: number;
/**
* Fully qualified key (including the queue prefix) pointing to the parent of this job.
*/
parentKey?: string;
/**
* Object that contains parentId (id) and parent queueKey.
*/
parent?: ParentKeys;
/**
* Debounce identifier.
* @deprecated use deduplicationId
*/
debounceId?: string;
/**
* Deduplication identifier.
*/
deduplicationId?: string;
/**
* Base repeat job key.
*/
repeatJobKey?: string;
/**
* Produced next repetable job Id.
*
*/
nextRepeatableJobId?: string;
/**
* The token used for locking this job.
*/
token?: string;
/**
* The worker name that is processing or processed this job.
*/
processedBy?: string;
protected toKey: (type: string) => string;
/**
* @deprecated use UnrecoverableError
*/
protected discarded: boolean;
protected scripts: Scripts;
constructor(queue: MinimalQueue,
/**
* The name of the Job
*/
name: NameType,
/**
* The payload for this job.
*/
data: DataType,
/**
* The options object for this job.
*/
opts?: JobsOptions, id?: string);
/**
* Creates a new job and adds it to the queue.
*
* @param queue - the queue where to add the job.
* @param name - the name of the job.
* @param data - the payload of the job.
* @param opts - the options bag for this job.
* @returns
*/
static create<T = any, R = any, N extends string = string>(queue: MinimalQueue, name: N, data: T, opts?: JobsOptions): Promise<Job<T, R, N>>;
/**
* Creates a bulk of jobs and adds them atomically to the given queue.
*
* @param queue -the queue were to add the jobs.
* @param jobs - an array of jobs to be added to the queue.
* @returns
*/
static createBulk<T = any, R = any, N extends string = string>(queue: MinimalQueue, jobs: {
name: N;
data: T;
opts?: BulkJobOptions;
}[]): Promise<Job<T, R, N>[]>;
/**
* Instantiates a Job from a JobJsonRaw object (coming from a deserialized JSON object)
*
* @param queue - the queue where the job belongs to.
* @param json - the plain object containing the job.
* @param jobId - an optional job id (overrides the id coming from the JSON object)
* @returns
*/
static fromJSON<T = any, R = any, N extends string = string>(queue: MinimalQueue, json: JobJsonRaw, jobId?: string): Job<T, R, N>;
protected createScripts(): void;
static optsFromJSON(rawOpts?: string, optsDecode?: Record<string, string>): JobsOptions;
/**
* Fetches a Job from the queue given the passed job id.
*
* @param queue - the queue where the job belongs to.
* @param jobId - the job id.
* @returns
*/
static fromId<T = any, R = any, N extends string = string>(queue: MinimalQueue, jobId: string): Promise<Job<T, R, N> | undefined>;
/**
* addJobLog
*
* @param queue - A minimal queue instance
* @param jobId - Job id
* @param logRow - String with a row of log data to be logged
* @param keepLogs - The optional amount of log entries to preserve
*
* @returns The total number of log entries for this job so far.
*/
static addJobLog(queue: MinimalQueue, jobId: string, logRow: string, keepLogs?: number): Promise<number>;
toJSON(): Omit<this, "queue" | "scripts" | "toJSON" | "asJSON" | "asJSONSandbox" | "updateData" | "updateProgress" | "log" | "removeChildDependency" | "clearLogs" | "remove" | "removeUnprocessedChildren" | "extendLock" | "moveToCompleted" | "moveToWait" | "moveToFailed" | "isCompleted" | "isFailed" | "isDelayed" | "isWaitingChildren" | "isActive" | "isWaiting" | "queueName" | "prefix" | "getState" | "changeDelay" | "changePriority" | "getChildrenValues" | "getIgnoredChildrenFailures" | "getFailedChildrenValues" | "getDependencies" | "getDependenciesCount" | "waitUntilFinished" | "moveToDelayed" | "moveToWaitingChildren" | "promote" | "retry" | "discard" | "addJob" | "removeDeduplicationKey">;
/**
* Prepares a job to be serialized for storage in Redis.
* @returns
*/
asJSON(): JobJson;
static optsAsJSON(opts?: JobsOptions, optsEncode?: Record<string, string>): RedisJobOptions;
/**
* Prepares a job to be passed to Sandbox.
* @returns
*/
asJSONSandbox(): JobJsonSandbox;
/**
* Updates a job's data
*
* @param data - the data that will replace the current jobs data.
*/
updateData(data: DataType): Promise<void>;
/**
* Updates a job's progress
*
* @param progress - number or object to be saved as progress.
*/
updateProgress(progress: JobProgress): Promise<void>;
/**
* Logs one row of log data.
*
* @param logRow - string with log data to be logged.
* @returns The total number of log entries for this job so far.
*/
log(logRow: string): Promise<number>;
/**
* Removes child dependency from parent when child is not yet finished
*
* @returns True if the relationship existed and if it was removed.
*/
removeChildDependency(): Promise<boolean>;
/**
* Clears job's logs
*
* @param keepLogs - the amount of log entries to preserve
*/
clearLogs(keepLogs?: number): Promise<void>;
/**
* Completely remove the job from the queue.
* Note, this call will throw an exception if the job
* is being processed when the call is performed.
*
* @param opts - Options to remove a job
*/
remove({ removeChildren }?: {
removeChildren?: boolean;
}): Promise<void>;
/**
* Remove all children from this job that are not yet processed,
* in other words that are in any other state than completed, failed or active.
*
* @remarks
* - Jobs with locks (most likely active) are ignored.
* - This method can be slow if the number of children is large (\> 1000).
*/
removeUnprocessedChildren(): Promise<void>;
/**
* Extend the lock for this job.
*
* @param token - unique token for the lock
* @param duration - lock duration in milliseconds
*/
extendLock(token: string, duration: number): Promise<number>;
/**
* Moves a job to the completed queue.
* Returned job to be used with Queue.prototype.nextJobFromJobData.
*
* @param returnValue - The jobs success message.
* @param token - Worker token used to acquire completed job.
* @param fetchNext - True when wanting to fetch the next job.
* @returns Returns the jobData of the next job in the waiting queue or void.
*/
moveToCompleted(returnValue: ReturnType, token: string, fetchNext?: boolean): Promise<void | any[]>;
/**
* Moves a job to the wait or prioritized state.
*
* @param token - Worker token used to acquire completed job.
* @returns Returns pttl.
*/
moveToWait(token?: string): Promise<number>;
private shouldRetryJob;
/**
* Moves a job to the failed queue.
*
* @param err - the jobs error message.
* @param token - token to check job is locked by current worker
* @param fetchNext - true when wanting to fetch the next job
* @returns Returns the jobData of the next job in the waiting queue or void.
*/
moveToFailed<E extends Error>(err: E, token: string, fetchNext?: boolean): Promise<void | any[]>;
private getSpanOperation;
/**
* @returns true if the job has completed.
*/
isCompleted(): Promise<boolean>;
/**
* @returns true if the job has failed.
*/
isFailed(): Promise<boolean>;
/**
* @returns true if the job is delayed.
*/
isDelayed(): Promise<boolean>;
/**
* @returns true if the job is waiting for children.
*/
isWaitingChildren(): Promise<boolean>;
/**
* @returns true of the job is active.
*/
isActive(): Promise<boolean>;
/**
* @returns true if the job is waiting.
*/
isWaiting(): Promise<boolean>;
/**
* @returns the queue name this job belongs to.
*/
get queueName(): string;
/**
* @returns the prefix that is used.
*/
get prefix(): string;
/**
* Get current state.
*
* @returns Returns one of these values:
* 'completed', 'failed', 'delayed', 'active', 'waiting', 'waiting-children', 'unknown'.
*/
getState(): Promise<JobState | 'unknown'>;
/**
* Change delay of a delayed job.
*
* Reschedules a delayed job by setting a new delay from the current time.
* For example, calling changeDelay(5000) will reschedule the job to execute
* 5000 milliseconds (5 seconds) from now, regardless of the original delay.
*
* @param delay - milliseconds from now when the job should be processed.
* @returns void
* @throws JobNotExist
* This exception is thrown if jobId is missing.
* @throws JobNotInState
* This exception is thrown if job is not in delayed state.
*/
changeDelay(delay: number): Promise<void>;
/**
* Change job priority.
*
* @param opts - options containing priority and lifo values.
* @returns void
*/
changePriority(opts: {
priority?: number;
lifo?: boolean;
}): Promise<void>;
/**
* Get this jobs children result values if any.
*
* @returns Object mapping children job keys with their values.
*/
getChildrenValues<CT = any>(): Promise<{
[jobKey: string]: CT;
}>;
/**
* Retrieves the failures of child jobs that were explicitly ignored while using ignoreDependencyOnFailure option.
* This method is useful for inspecting which child jobs were intentionally ignored when an error occured.
* @see {@link https://docs.bullmq.io/guide/flows/ignore-dependency}
*
* @returns Object mapping children job keys with their failure values.
*/
getIgnoredChildrenFailures(): Promise<{
[jobKey: string]: string;
}>;
/**
* Get job's children failure values that were ignored if any.
*
* @deprecated This method is deprecated and will be removed in v6. Use getIgnoredChildrenFailures instead.
*
* @returns Object mapping children job keys with their failure values.
*/
getFailedChildrenValues(): Promise<{
[jobKey: string]: string;
}>;
/**
* Get children job keys if this job is a parent and has children.
* @remarks
* Count options before Redis v7.2 works as expected with any quantity of entries
* on processed/unprocessed dependencies, since v7.2 you must consider that count
* won't have any effect until processed/unprocessed dependencies have a length
* greater than 127
* @see {@link https://redis.io/docs/management/optimization/memory-optimization/#redis--72}
* @see {@link https://docs.bullmq.io/guide/flows#getters}
* @returns dependencies separated by processed, unprocessed, ignored and failed.
*/
getDependencies(opts?: DependenciesOpts): Promise<{
nextFailedCursor?: number;
failed?: string[];
nextIgnoredCursor?: number;
ignored?: Record<string, any>;
nextProcessedCursor?: number;
processed?: Record<string, any>;
nextUnprocessedCursor?: number;
unprocessed?: string[];
}>;
/**
* Get children job counts if this job is a parent and has children.
*
* @returns dependencies count separated by processed, unprocessed, ignored and failed.
*/
getDependenciesCount(opts?: {
failed?: boolean;
ignored?: boolean;
processed?: boolean;
unprocessed?: boolean;
}): Promise<{
failed?: number;
ignored?: number;
processed?: number;
unprocessed?: number;
}>;
/**
* Returns a promise the resolves when the job has completed (containing the return value of the job),
* or rejects when the job has failed (containing the failedReason).
*
* @param queueEvents - Instance of QueueEvents.
* @param ttl - Time in milliseconds to wait for job to finish before timing out.
*/
waitUntilFinished(queueEvents: QueueEvents, ttl?: number): Promise<ReturnType>;
/**
* Moves the job to the delay set.
*
* @param timestamp - timestamp when the job should be moved back to "wait"
* @param token - token to check job is locked by current worker
* @returns
*/
moveToDelayed(timestamp: number, token?: string): Promise<void>;
/**
* Moves the job to the waiting-children set.
*
* @param token - Token to check job is locked by current worker
* @param opts - The options bag for moving a job to waiting-children.
* @returns true if the job was moved
*/
moveToWaitingChildren(token: string, opts?: MoveToWaitingChildrenOpts): Promise<boolean>;
/**
* Promotes a delayed job so that it starts to be processed as soon as possible.
*/
promote(): Promise<void>;
/**
* Attempts to retry the job. Only a job that has failed or completed can be retried.
*
* @param state - completed / failed
* @param opts - options to retry a job
* @returns A promise that resolves when the job has been successfully moved to the wait queue.
* The queue emits a waiting event when the job is successfully moved.
* @throws Will throw an error if the job does not exist, is locked, or is not in the expected state.
*/
retry(state?: FinishedStatus, opts?: RetryOptions): Promise<void>;
/**
* Marks a job to not be retried if it fails (even if attempts has been configured)
* @deprecated use UnrecoverableError
*/
discard(): void;
private isInZSet;
private isInList;
/**
* Adds the job to Redis.
*
* @param client -
* @param parentOpts -
* @returns
*/
addJob(client: RedisClient, parentOpts?: ParentKeyOpts): Promise<string>;
/**
* Removes a deduplication key if job is still the cause of deduplication.
* @returns true if the deduplication key was removed.
*/
removeDeduplicationKey(): Promise<boolean>;
protected validateOptions(jobData: JobJson): void;
protected updateStacktrace(err: Error): void;
}

1049
backend/node_modules/bullmq/dist/esm/classes/job.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,69 @@
import { AbortController } from 'node-abort-controller';
import { LockManagerWorkerContext } from '../interfaces';
export interface LockManagerOptions {
lockRenewTime: number;
lockDuration: number;
workerId: string;
workerName?: string;
}
/**
* Manages lock renewal for BullMQ workers.
* It periodically extends locks for active jobs to prevent them from being
* considered stalled by other workers.
*/
export declare class LockManager {
protected worker: LockManagerWorkerContext;
protected opts: LockManagerOptions;
protected lockRenewalTimer?: NodeJS.Timeout;
protected trackedJobs: Map<string, {
token: string;
ts: number;
abortController?: AbortController;
}>;
protected closed: boolean;
constructor(worker: LockManagerWorkerContext, opts: LockManagerOptions);
/**
* Starts the lock manager timers for lock renewal.
*/
start(): void;
protected extendLocks(jobIds: string[]): Promise<void>;
private startLockExtenderTimer;
/**
* Stops the lock manager and clears all timers.
*/
close(): Promise<void>;
/**
* Adds a job to be tracked for lock renewal.
* Returns an AbortController if shouldCreateController is true, undefined otherwise.
*/
trackJob(jobId: string, token: string, ts: number, shouldCreateController?: boolean): AbortController | undefined;
/**
* Removes a job from lock renewal tracking.
*/
untrackJob(jobId: string): void;
/**
* Gets the number of jobs currently being tracked.
*/
getActiveJobCount(): number;
/**
* Checks if the lock manager is running.
*/
isRunning(): boolean;
/**
* Cancels a specific job by aborting its signal.
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId: string, reason?: string): boolean;
/**
* Cancels all tracked jobs by aborting their signals.
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason?: string): void;
/**
* Gets a list of all tracked job IDs.
* @returns Array of job IDs currently being tracked
*/
getTrackedJobIds(): string[];
}

View File

@@ -0,0 +1,161 @@
import { AbortController } from 'node-abort-controller';
import { SpanKind, TelemetryAttributes } from '../enums';
/**
* Manages lock renewal for BullMQ workers.
* It periodically extends locks for active jobs to prevent them from being
* considered stalled by other workers.
*/
export class LockManager {
constructor(worker, opts) {
this.worker = worker;
this.opts = opts;
// Maps job ids with their tokens, timestamps, and abort controllers
this.trackedJobs = new Map();
this.closed = false;
}
/**
* Starts the lock manager timers for lock renewal.
*/
start() {
if (this.closed) {
return;
}
// Start lock renewal timer if not disabled
if (this.opts.lockRenewTime > 0) {
this.startLockExtenderTimer();
}
}
async extendLocks(jobIds) {
await this.worker.trace(SpanKind.INTERNAL, 'extendLocks', this.worker.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.opts.workerId,
[TelemetryAttributes.WorkerName]: this.opts.workerName,
[TelemetryAttributes.WorkerJobsToExtendLocks]: jobIds,
});
try {
const jobTokens = jobIds.map(id => { var _a; return ((_a = this.trackedJobs.get(id)) === null || _a === void 0 ? void 0 : _a.token) || ''; });
const erroredJobIds = await this.worker.extendJobLocks(jobIds, jobTokens, this.opts.lockDuration);
if (erroredJobIds.length > 0) {
this.worker.emit('lockRenewalFailed', erroredJobIds);
for (const jobId of erroredJobIds) {
this.worker.emit('error', new Error(`could not renew lock for job ${jobId}`));
}
}
const succeededJobIds = jobIds.filter(id => !erroredJobIds.includes(id));
if (succeededJobIds.length > 0) {
this.worker.emit('locksRenewed', {
count: succeededJobIds.length,
jobIds: succeededJobIds,
});
}
}
catch (err) {
this.worker.emit('error', err);
}
});
}
startLockExtenderTimer() {
clearTimeout(this.lockRenewalTimer);
if (!this.closed) {
this.lockRenewalTimer = setTimeout(async () => {
// Get all the jobs whose locks expire in less than 1/2 of the lockRenewTime
const now = Date.now();
const jobsToExtend = [];
for (const jobId of this.trackedJobs.keys()) {
const tracked = this.trackedJobs.get(jobId);
const { ts, token, abortController } = tracked;
if (!ts) {
this.trackedJobs.set(jobId, { token, ts: now, abortController });
continue;
}
if (ts + this.opts.lockRenewTime / 2 < now) {
this.trackedJobs.set(jobId, { token, ts: now, abortController });
jobsToExtend.push(jobId);
}
}
if (jobsToExtend.length) {
await this.extendLocks(jobsToExtend);
}
this.startLockExtenderTimer();
}, this.opts.lockRenewTime / 2);
}
}
/**
* Stops the lock manager and clears all timers.
*/
async close() {
if (this.closed) {
return;
}
this.closed = true;
if (this.lockRenewalTimer) {
clearTimeout(this.lockRenewalTimer);
this.lockRenewalTimer = undefined;
}
this.trackedJobs.clear();
}
/**
* Adds a job to be tracked for lock renewal.
* Returns an AbortController if shouldCreateController is true, undefined otherwise.
*/
trackJob(jobId, token, ts, shouldCreateController = false) {
const abortController = shouldCreateController
? new AbortController()
: undefined;
if (!this.closed && jobId) {
this.trackedJobs.set(jobId, { token, ts, abortController });
}
return abortController;
}
/**
* Removes a job from lock renewal tracking.
*/
untrackJob(jobId) {
this.trackedJobs.delete(jobId);
}
/**
* Gets the number of jobs currently being tracked.
*/
getActiveJobCount() {
return this.trackedJobs.size;
}
/**
* Checks if the lock manager is running.
*/
isRunning() {
return !this.closed && this.lockRenewalTimer !== undefined;
}
/**
* Cancels a specific job by aborting its signal.
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId, reason) {
const tracked = this.trackedJobs.get(jobId);
if (tracked === null || tracked === void 0 ? void 0 : tracked.abortController) {
tracked.abortController.abort(reason);
return true;
}
return false;
}
/**
* Cancels all tracked jobs by aborting their signals.
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason) {
for (const tracked of this.trackedJobs.values()) {
if (tracked.abortController) {
tracked.abortController.abort(reason);
}
}
}
/**
* Gets a list of all tracked job IDs.
* @returns Array of job IDs currently being tracked
*/
getTrackedJobIds() {
return Array.from(this.trackedJobs.keys());
}
}
//# sourceMappingURL=lock-manager.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"lock-manager.js","sourceRoot":"","sources":["../../../src/classes/lock-manager.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AACxD,OAAO,EAAE,QAAQ,EAAE,mBAAmB,EAAE,MAAM,UAAU,CAAC;AAUzD;;;;GAIG;AACH,MAAM,OAAO,WAAW;IAUtB,YACY,MAAgC,EAChC,IAAwB;QADxB,WAAM,GAAN,MAAM,CAA0B;QAChC,SAAI,GAAJ,IAAI,CAAoB;QATpC,oEAAoE;QAC1D,gBAAW,GAAG,IAAI,GAAG,EAG5B,CAAC;QACM,WAAM,GAAG,KAAK,CAAC;IAKtB,CAAC;IAEJ;;OAEG;IACH,KAAK;QACH,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,2CAA2C;QAC3C,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,EAAE,CAAC;YAChC,IAAI,CAAC,sBAAsB,EAAE,CAAC;QAChC,CAAC;IACH,CAAC;IAES,KAAK,CAAC,WAAW,CAAC,MAAgB;QAC1C,MAAM,IAAI,CAAC,MAAM,CAAC,KAAK,CACrB,QAAQ,CAAC,QAAQ,EACjB,aAAa,EACb,IAAI,CAAC,MAAM,CAAC,IAAI,EAChB,KAAK,EAAE,IAAW,EAAE,EAAE;YACpB,IAAI,aAAJ,IAAI,uBAAJ,IAAI,CAAE,aAAa,CAAC;gBAClB,CAAC,mBAAmB,CAAC,QAAQ,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,QAAQ;gBAClD,CAAC,mBAAmB,CAAC,UAAU,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,UAAU;gBACtD,CAAC,mBAAmB,CAAC,uBAAuB,CAAC,EAAE,MAAM;aACtD,CAAC,CAAC;YAEH,IAAI,CAAC;gBACH,MAAM,SAAS,GAAG,MAAM,CAAC,GAAG,CAC1B,EAAE,CAAC,EAAE,WAAC,OAAA,CAAA,MAAA,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,0CAAE,KAAK,KAAI,EAAE,CAAA,EAAA,CAC5C,CAAC;gBAEF,MAAM,aAAa,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC,cAAc,CACpD,MAAM,EACN,SAAS,EACT,IAAI,CAAC,IAAI,CAAC,YAAY,CACvB,CAAC;gBAEF,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,mBAAmB,EAAE,aAAa,CAAC,CAAC;oBAErD,KAAK,MAAM,KAAK,IAAI,aAAa,EAAE,CAAC;wBAClC,IAAI,CAAC,MAAM,CAAC,IAAI,CACd,OAAO,EACP,IAAI,KAAK,CAAC,gCAAgC,KAAK,EAAE,CAAC,CACnD,CAAC;oBACJ,CAAC;gBACH,CAAC;gBAED,MAAM,eAAe,GAAG,MAAM,CAAC,MAAM,CACnC,EAAE,CAAC,EAAE,CAAC,CAAC,aAAa,CAAC,QAAQ,CAAC,EAAE,CAAC,CAClC,CAAC;gBAEF,IAAI,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBAC/B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,cAAc,EAAE;wBAC/B,KAAK,EAAE,eAAe,CAAC,MAAM;wBAC7B,MAAM,EAAE,eAAe;qBACxB,CAAC,CAAC;gBACL,CAAC;YACH,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,OAAO,EAAE,GAAY,CAAC,CAAC;YAC1C,CAAC;QACH,CAAC,CACF,CAAC;IACJ,CAAC;IAEO,sBAAsB;QAC5B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;QAEpC,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC;YACjB,IAAI,CAAC,gBAAgB,GAAG,UAAU,CAAC,KAAK,IAAI,EAAE;gBAC5C,4EAA4E;gBAC5E,MAAM,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;gBACvB,MAAM,YAAY,GAAa,EAAE,CAAC;gBAElC,KAAK,MAAM,KAAK,IAAI,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,EAAE,CAAC;oBAC5C,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAE,CAAC;oBAC7C,MAAM,EAAE,EAAE,EAAE,KAAK,EAAE,eAAe,EAAE,GAAG,OAAO,CAAC;oBAC/C,IAAI,CAAC,EAAE,EAAE,CAAC;wBACR,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,SAAS;oBACX,CAAC;oBAED,IAAI,EAAE,GAAG,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,GAAG,GAAG,EAAE,CAAC;wBAC3C,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,GAAG,EAAE,eAAe,EAAE,CAAC,CAAC;wBACjE,YAAY,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;oBAC3B,CAAC;gBACH,CAAC;gBAED,IAAI,YAAY,CAAC,MAAM,EAAE,CAAC;oBACxB,MAAM,IAAI,CAAC,WAAW,CAAC,YAAY,CAAC,CAAC;gBACvC,CAAC;gBAED,IAAI,CAAC,sBAAsB,EAAE,CAAC;YAChC,CAAC,EAAE,IAAI,CAAC,IAAI,CAAC,aAAa,GAAG,CAAC,CAAC,CAAC;QAClC,CAAC;IACH,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;YAChB,OAAO;QACT,CAAC;QAED,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QAEnB,IAAI,IAAI,CAAC,gBAAgB,EAAE,CAAC;YAC1B,YAAY,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;YACpC,IAAI,CAAC,gBAAgB,GAAG,SAAS,CAAC;QACpC,CAAC;QAED,IAAI,CAAC,WAAW,CAAC,KAAK,EAAE,CAAC;IAC3B,CAAC;IAED;;;OAGG;IACH,QAAQ,CACN,KAAa,EACb,KAAa,EACb,EAAU,EACV,sBAAsB,GAAG,KAAK;QAE9B,MAAM,eAAe,GAAG,sBAAsB;YAC5C,CAAC,CAAC,IAAI,eAAe,EAAE;YACvB,CAAC,CAAC,SAAS,CAAC;QACd,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,KAAK,EAAE,CAAC;YAC1B,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,EAAE,KAAK,EAAE,EAAE,EAAE,eAAe,EAAE,CAAC,CAAC;QAC9D,CAAC;QACD,OAAO,eAAe,CAAC;IACzB,CAAC;IAED;;OAEG;IACH,UAAU,CAAC,KAAa;QACtB,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;IACjC,CAAC;IAED;;OAEG;IACH,iBAAiB;QACf,OAAO,IAAI,CAAC,WAAW,CAAC,IAAI,CAAC;IAC/B,CAAC;IAED;;OAEG;IACH,SAAS;QACP,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,gBAAgB,KAAK,SAAS,CAAC;IAC7D,CAAC;IAED;;;;;OAKG;IACH,SAAS,CAAC,KAAa,EAAE,MAAe;QACtC,MAAM,OAAO,GAAG,IAAI,CAAC,WAAW,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;QAC5C,IAAI,OAAO,aAAP,OAAO,uBAAP,OAAO,CAAE,eAAe,EAAE,CAAC;YAC7B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACtC,OAAO,IAAI,CAAC;QACd,CAAC;QACD,OAAO,KAAK,CAAC;IACf,CAAC;IAED;;;OAGG;IACH,aAAa,CAAC,MAAe;QAC3B,KAAK,MAAM,OAAO,IAAI,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,EAAE,CAAC;YAChD,IAAI,OAAO,CAAC,eAAe,EAAE,CAAC;gBAC5B,OAAO,CAAC,eAAe,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACxC,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;OAGG;IACH,gBAAgB;QACd,OAAO,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE,CAAC,CAAC;IAC7C,CAAC;CACF"}

View File

@@ -0,0 +1,3 @@
import { Receiver } from '../interfaces';
declare const _default: (send: (msg: any) => Promise<void>, receiver: Receiver) => void;
export default _default;

View File

@@ -0,0 +1,42 @@
/**
* Wrapper for sandboxing.
*
*/
import { ChildProcessor } from './child-processor';
import { ParentCommand, ChildCommand } from '../enums';
import { errorToJSON, toString } from '../utils';
export default (send, receiver) => {
const childProcessor = new ChildProcessor(send, receiver);
receiver === null || receiver === void 0 ? void 0 : receiver.on('message', async (msg) => {
try {
switch (msg.cmd) {
case ChildCommand.Init:
await childProcessor.init(msg.value);
break;
case ChildCommand.Start:
await childProcessor.start(msg.job, msg === null || msg === void 0 ? void 0 : msg.token);
break;
case ChildCommand.Stop:
break;
}
}
catch (err) {
console.error('Error handling child message');
}
});
process.on('SIGTERM', () => childProcessor.waitForCurrentJobAndExit());
process.on('SIGINT', () => childProcessor.waitForCurrentJobAndExit());
process.on('uncaughtException', async (err) => {
if (typeof err !== 'object') {
err = new Error(toString(err));
}
await send({
cmd: ParentCommand.Failed,
value: errorToJSON(err),
});
// An uncaughException leaves this process in a potentially undetermined state so
// we must exit
process.exit();
});
};
//# sourceMappingURL=main-base.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main-base.js","sourceRoot":"","sources":["../../../src/classes/main-base.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,OAAO,EAAE,cAAc,EAAE,MAAM,mBAAmB,CAAC;AACnD,OAAO,EAAE,aAAa,EAAE,YAAY,EAAE,MAAM,UAAU,CAAC;AACvD,OAAO,EAAE,WAAW,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAC;AAGjD,eAAe,CAAC,IAAiC,EAAE,QAAkB,EAAE,EAAE;IACvE,MAAM,cAAc,GAAG,IAAI,cAAc,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;IAE1D,QAAQ,aAAR,QAAQ,uBAAR,QAAQ,CAAE,EAAE,CAAC,SAAS,EAAE,KAAK,EAAC,GAAG,EAAC,EAAE;QAClC,IAAI,CAAC;YACH,QAAQ,GAAG,CAAC,GAAmB,EAAE,CAAC;gBAChC,KAAK,YAAY,CAAC,IAAI;oBACpB,MAAM,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;oBACrC,MAAM;gBACR,KAAK,YAAY,CAAC,KAAK;oBACrB,MAAM,cAAc,CAAC,KAAK,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,aAAH,GAAG,uBAAH,GAAG,CAAE,KAAK,CAAC,CAAC;oBAChD,MAAM;gBACR,KAAK,YAAY,CAAC,IAAI;oBACpB,MAAM;YACV,CAAC;QACH,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,OAAO,CAAC,KAAK,CAAC,8BAA8B,CAAC,CAAC;QAChD,CAAC;IACH,CAAC,CAAC,CAAC;IAEH,OAAO,CAAC,EAAE,CAAC,SAAS,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IACvE,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,cAAc,CAAC,wBAAwB,EAAE,CAAC,CAAC;IAEtE,OAAO,CAAC,EAAE,CAAC,mBAAmB,EAAE,KAAK,EAAE,GAAQ,EAAE,EAAE;QACjD,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE,CAAC;YAC5B,GAAG,GAAG,IAAI,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAC;QACjC,CAAC;QAED,MAAM,IAAI,CAAC;YACT,GAAG,EAAE,aAAa,CAAC,MAAM;YACzB,KAAK,EAAE,WAAW,CAAC,GAAG,CAAC;SACxB,CAAC,CAAC;QAEH,iFAAiF;QACjF,eAAe;QACf,OAAO,CAAC,IAAI,EAAE,CAAC;IACjB,CAAC,CAAC,CAAC;AACL,CAAC,CAAC"}

View File

@@ -0,0 +1 @@
export {};

View File

@@ -0,0 +1,8 @@
/**
* Worker Thread wrapper for sandboxing
*
*/
import { parentPort } from 'worker_threads';
import mainBase from './main-base';
mainBase(async (msg) => parentPort.postMessage(msg), parentPort);
//# sourceMappingURL=main-worker.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main-worker.js","sourceRoot":"","sources":["../../../src/classes/main-worker.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,OAAO,EAAE,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAC5C,OAAO,QAAQ,MAAM,aAAa,CAAC;AAEnC,QAAQ,CAAC,KAAK,EAAE,GAAQ,EAAE,EAAE,CAAC,UAAU,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,UAAU,CAAC,CAAC"}

View File

@@ -0,0 +1 @@
export {};

8
backend/node_modules/bullmq/dist/esm/classes/main.js generated vendored Normal file
View File

@@ -0,0 +1,8 @@
/**
* Child process wrapper for sandboxing.
*
*/
import { childSend } from '../utils';
import mainBase from './main-base';
mainBase((msg) => childSend(process, msg), process);
//# sourceMappingURL=main.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"main.js","sourceRoot":"","sources":["../../../src/classes/main.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,OAAO,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;AACrC,OAAO,QAAQ,MAAM,aAAa,CAAC;AAEnC,QAAQ,CAAC,CAAC,GAAQ,EAAE,EAAE,CAAC,SAAS,CAAC,OAAO,EAAE,GAAG,CAAC,EAAE,OAAO,CAAC,CAAC"}

View File

@@ -0,0 +1,78 @@
import { EventEmitter } from 'events';
import { MinimalQueue, QueueBaseOptions, RedisClient, Span } from '../interfaces';
import { RedisConnection } from './redis-connection';
import { Job } from './job';
import { KeysMap } from './queue-keys';
import { Scripts } from './scripts';
import { SpanKind } from '../enums';
/**
* Base class for all classes that need to interact with queues.
* This class is normally not used directly, but extended by the other classes.
*
*/
export declare class QueueBase extends EventEmitter implements MinimalQueue {
readonly name: string;
opts: QueueBaseOptions;
toKey: (type: string) => string;
keys: KeysMap;
closing: Promise<void> | undefined;
protected closed: boolean;
protected hasBlockingConnection: boolean;
protected scripts: Scripts;
protected connection: RedisConnection;
readonly qualifiedName: string;
/**
*
* @param name - The name of the queue.
* @param opts - Options for the queue.
* @param Connection - An optional "Connection" class used to instantiate a Connection. This is useful for
* testing with mockups and/or extending the Connection class and passing an alternate implementation.
*/
constructor(name: string, opts?: QueueBaseOptions, Connection?: typeof RedisConnection, hasBlockingConnection?: boolean);
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client(): Promise<RedisClient>;
protected createScripts(): void;
/**
* Returns the version of the Redis instance the client is connected to,
*/
get redisVersion(): string;
/**
* Helper to easily extend Job class calls.
*/
protected get Job(): typeof Job;
/**
* Emits an event. Normally used by subclasses to emit events.
*
* @param event - The emitted event.
* @param args -
* @returns
*/
emit(event: string | symbol, ...args: any[]): boolean;
waitUntilReady(): Promise<RedisClient>;
protected base64Name(): string;
protected clientName(suffix?: string): string;
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
close(): Promise<void>;
/**
*
* Force disconnects a connection.
*/
disconnect(): Promise<void>;
protected checkConnectionError<T>(fn: () => Promise<T>, delayInMs?: number): Promise<T | undefined>;
/**
* Wraps the code with telemetry and provides a span for configuration.
*
* @param spanKind - kind of the span: Producer, Consumer, Internal
* @param operation - operation name (such as add, process, etc)
* @param destination - destination name (normally the queue name)
* @param callback - code to wrap with telemetry
* @param srcPropagationMedatada -
* @returns
*/
trace<T>(spanKind: SpanKind, operation: string, destination: string, callback: (span?: Span, dstPropagationMetadata?: string) => Promise<T> | T, srcPropagationMetadata?: string): Promise<T | Promise<T>>;
}

View File

@@ -0,0 +1,153 @@
import { EventEmitter } from 'events';
import { delay, DELAY_TIME_5, isNotConnectionError, isRedisInstance, trace, } from '../utils';
import { createScripts } from '../utils/create-scripts';
import { RedisConnection } from './redis-connection';
import { Job } from './job';
import { QueueKeys } from './queue-keys';
/**
* Base class for all classes that need to interact with queues.
* This class is normally not used directly, but extended by the other classes.
*
*/
export class QueueBase extends EventEmitter {
/**
*
* @param name - The name of the queue.
* @param opts - Options for the queue.
* @param Connection - An optional "Connection" class used to instantiate a Connection. This is useful for
* testing with mockups and/or extending the Connection class and passing an alternate implementation.
*/
constructor(name, opts = { connection: {} }, Connection = RedisConnection, hasBlockingConnection = false) {
super();
this.name = name;
this.opts = opts;
this.closed = false;
this.hasBlockingConnection = false;
this.hasBlockingConnection = hasBlockingConnection;
this.opts = Object.assign({ prefix: 'bull' }, opts);
if (!name) {
throw new Error('Queue name must be provided');
}
if (name.includes(':')) {
throw new Error('Queue name cannot contain :');
}
this.connection = new Connection(opts.connection, {
shared: isRedisInstance(opts.connection),
blocking: hasBlockingConnection,
skipVersionCheck: opts.skipVersionCheck,
skipWaitingForReady: opts.skipWaitingForReady,
});
this.connection.on('error', (error) => this.emit('error', error));
this.connection.on('close', () => {
if (!this.closing) {
this.emit('ioredis:close');
}
});
const queueKeys = new QueueKeys(opts.prefix);
this.qualifiedName = queueKeys.getQueueQualifiedName(name);
this.keys = queueKeys.getKeys(name);
this.toKey = (type) => queueKeys.toKey(name, type);
this.createScripts();
}
/**
* Returns a promise that resolves to a redis client. Normally used only by subclasses.
*/
get client() {
return this.connection.client;
}
createScripts() {
this.scripts = createScripts(this);
}
/**
* Returns the version of the Redis instance the client is connected to,
*/
get redisVersion() {
return this.connection.redisVersion;
}
/**
* Helper to easily extend Job class calls.
*/
get Job() {
return Job;
}
/**
* Emits an event. Normally used by subclasses to emit events.
*
* @param event - The emitted event.
* @param args -
* @returns
*/
emit(event, ...args) {
try {
return super.emit(event, ...args);
}
catch (err) {
try {
return super.emit('error', err);
}
catch (err) {
// We give up if the error event also throws an exception.
console.error(err);
return false;
}
}
}
waitUntilReady() {
return this.client;
}
base64Name() {
return Buffer.from(this.name).toString('base64');
}
clientName(suffix = '') {
const queueNameBase64 = this.base64Name();
return `${this.opts.prefix}:${queueNameBase64}${suffix}`;
}
/**
*
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
this.closed = true;
}
/**
*
* Force disconnects a connection.
*/
disconnect() {
return this.connection.disconnect();
}
async checkConnectionError(fn, delayInMs = DELAY_TIME_5) {
try {
return await fn();
}
catch (error) {
if (isNotConnectionError(error)) {
this.emit('error', error);
}
if (!this.closing && delayInMs) {
await delay(delayInMs);
}
else {
return;
}
}
}
/**
* Wraps the code with telemetry and provides a span for configuration.
*
* @param spanKind - kind of the span: Producer, Consumer, Internal
* @param operation - operation name (such as add, process, etc)
* @param destination - destination name (normally the queue name)
* @param callback - code to wrap with telemetry
* @param srcPropagationMedatada -
* @returns
*/
trace(spanKind, operation, destination, callback, srcPropagationMetadata) {
return trace(this.opts.telemetry, spanKind, this.name, operation, destination, callback, srcPropagationMetadata);
}
}
//# sourceMappingURL=queue-base.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-base.js","sourceRoot":"","sources":["../../../src/classes/queue-base.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,QAAQ,CAAC;AAQtC,OAAO,EACL,KAAK,EACL,YAAY,EACZ,oBAAoB,EACpB,eAAe,EACf,KAAK,GACN,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAE,eAAe,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,EAAE,GAAG,EAAE,MAAM,OAAO,CAAC;AAC5B,OAAO,EAAW,SAAS,EAAE,MAAM,cAAc,CAAC;AAIlD;;;;GAIG;AACH,MAAM,OAAO,SAAU,SAAQ,YAAY;IAWzC;;;;;;OAMG;IACH,YACkB,IAAY,EACrB,OAAyB,EAAE,UAAU,EAAE,EAAE,EAAE,EAClD,aAAqC,eAAe,EACpD,qBAAqB,GAAG,KAAK;QAE7B,KAAK,EAAE,CAAC;QALQ,SAAI,GAAJ,IAAI,CAAQ;QACrB,SAAI,GAAJ,IAAI,CAAuC;QAf1C,WAAM,GAAG,KAAK,CAAC;QACf,0BAAqB,GAAG,KAAK,CAAC;QAoBtC,IAAI,CAAC,qBAAqB,GAAG,qBAAqB,CAAC;QACnD,IAAI,CAAC,IAAI,mBACP,MAAM,EAAE,MAAM,IACX,IAAI,CACR,CAAC;QAEF,IAAI,CAAC,IAAI,EAAE,CAAC;YACV,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,IAAI,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;YACvB,MAAM,IAAI,KAAK,CAAC,6BAA6B,CAAC,CAAC;QACjD,CAAC;QAED,IAAI,CAAC,UAAU,GAAG,IAAI,UAAU,CAAC,IAAI,CAAC,UAAU,EAAE;YAChD,MAAM,EAAE,eAAe,CAAC,IAAI,CAAC,UAAU,CAAC;YACxC,QAAQ,EAAE,qBAAqB;YAC/B,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,mBAAmB,EAAE,IAAI,CAAC,mBAAmB;SAC9C,CAAC,CAAC;QAEH,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,CAAC,KAAY,EAAE,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACzE,IAAI,CAAC,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE;YAC/B,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;gBAClB,IAAI,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;YAC7B,CAAC;QACH,CAAC,CAAC,CAAC;QAEH,MAAM,SAAS,GAAG,IAAI,SAAS,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QAC7C,IAAI,CAAC,aAAa,GAAG,SAAS,CAAC,qBAAqB,CAAC,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,IAAI,GAAG,SAAS,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC;QACpC,IAAI,CAAC,KAAK,GAAG,CAAC,IAAY,EAAE,EAAE,CAAC,SAAS,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QAC3D,IAAI,CAAC,aAAa,EAAE,CAAC;IACvB,CAAC;IAED;;OAEG;IACH,IAAI,MAAM;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,MAAM,CAAC;IAChC,CAAC;IAES,aAAa;QACrB,IAAI,CAAC,OAAO,GAAG,aAAa,CAAC,IAAI,CAAC,CAAC;IACrC,CAAC;IAED;;OAEG;IACH,IAAI,YAAY;QACd,OAAO,IAAI,CAAC,UAAU,CAAC,YAAY,CAAC;IACtC,CAAC;IAED;;OAEG;IACH,IAAc,GAAG;QACf,OAAO,GAAG,CAAC;IACb,CAAC;IAED;;;;;;OAMG;IACH,IAAI,CAAC,KAAsB,EAAE,GAAG,IAAW;QACzC,IAAI,CAAC;YACH,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;QACpC,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACb,IAAI,CAAC;gBACH,OAAO,KAAK,CAAC,IAAI,CAAC,OAAO,EAAE,GAAG,CAAC,CAAC;YAClC,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,0DAA0D;gBAC1D,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;gBACnB,OAAO,KAAK,CAAC;YACf,CAAC;QACH,CAAC;IACH,CAAC;IAED,cAAc;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC;IACrB,CAAC;IAES,UAAU;QAClB,OAAO,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACnD,CAAC;IAES,UAAU,CAAC,MAAM,GAAG,EAAE;QAC9B,MAAM,eAAe,GAAG,IAAI,CAAC,UAAU,EAAE,CAAC;QAC1C,OAAO,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,eAAe,GAAG,MAAM,EAAE,CAAC;IAC3D,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;QACnB,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;IACrB,CAAC;IAED;;;OAGG;IACH,UAAU;QACR,OAAO,IAAI,CAAC,UAAU,CAAC,UAAU,EAAE,CAAC;IACtC,CAAC;IAES,KAAK,CAAC,oBAAoB,CAClC,EAAoB,EACpB,SAAS,GAAG,YAAY;QAExB,IAAI,CAAC;YACH,OAAO,MAAM,EAAE,EAAE,CAAC;QACpB,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YACf,IAAI,oBAAoB,CAAC,KAAc,CAAC,EAAE,CAAC;gBACzC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAS,KAAK,CAAC,CAAC;YACnC,CAAC;YAED,IAAI,CAAC,IAAI,CAAC,OAAO,IAAI,SAAS,EAAE,CAAC;gBAC/B,MAAM,KAAK,CAAC,SAAS,CAAC,CAAC;YACzB,CAAC;iBAAM,CAAC;gBACN,OAAO;YACT,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;;;;;;OASG;IACH,KAAK,CACH,QAAkB,EAClB,SAAiB,EACjB,WAAmB,EACnB,QAA0E,EAC1E,sBAA+B;QAE/B,OAAO,KAAK,CACV,IAAI,CAAC,IAAI,CAAC,SAAS,EACnB,QAAQ,EACR,IAAI,CAAC,IAAI,EACT,SAAS,EACT,WAAW,EACX,QAAQ,EACR,sBAAsB,CACvB,CAAC;IACJ,CAAC;CACF"}

View File

@@ -0,0 +1,21 @@
import { QueueEventsProducerOptions } from '../interfaces';
import { QueueBase } from './queue-base';
import { RedisConnection } from './redis-connection';
/**
* The QueueEventsProducer class is used for publishing custom events.
*/
export declare class QueueEventsProducer extends QueueBase {
constructor(name: string, opts?: QueueEventsProducerOptions, Connection?: typeof RedisConnection);
/**
* Publish custom event to be processed in QueueEvents.
* @param argsObj - Event payload
* @param maxEvents - Max quantity of events to be saved
*/
publishEvent<T extends {
eventName: string;
}>(argsObj: T, maxEvents?: number): Promise<void>;
/**
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
close(): Promise<void>;
}

View File

@@ -0,0 +1,38 @@
import { __rest } from "tslib";
import { QueueBase } from './queue-base';
/**
* The QueueEventsProducer class is used for publishing custom events.
*/
export class QueueEventsProducer extends QueueBase {
constructor(name, opts = {
connection: {},
}, Connection) {
super(name, Object.assign({ blockingConnection: false }, opts), Connection);
this.opts = opts;
}
/**
* Publish custom event to be processed in QueueEvents.
* @param argsObj - Event payload
* @param maxEvents - Max quantity of events to be saved
*/
async publishEvent(argsObj, maxEvents = 1000) {
const client = await this.client;
const key = this.keys.events;
const { eventName } = argsObj, restArgs = __rest(argsObj, ["eventName"]);
const args = ['MAXLEN', '~', maxEvents, '*', 'event', eventName];
for (const [key, value] of Object.entries(restArgs)) {
args.push(key, value);
}
await client.xadd(key, ...args);
}
/**
* Closes the connection and returns a promise that resolves when the connection is closed.
*/
async close() {
if (!this.closing) {
this.closing = this.connection.close();
}
await this.closing;
}
}
//# sourceMappingURL=queue-events-producer.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-events-producer.js","sourceRoot":"","sources":["../../../src/classes/queue-events-producer.ts"],"names":[],"mappings":";AACA,OAAO,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAGzC;;GAEG;AACH,MAAM,OAAO,mBAAoB,SAAQ,SAAS;IAChD,YACE,IAAY,EACZ,OAAmC;QACjC,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;QAEnC,KAAK,CACH,IAAI,kBAEF,kBAAkB,EAAE,KAAK,IACtB,IAAI,GAET,UAAU,CACX,CAAC;QAEF,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC;IACnB,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,YAAY,CAChB,OAAU,EACV,SAAS,GAAG,IAAI;QAEhB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;QACjC,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,MAAM,EAAE,SAAS,KAAkB,OAAO,EAApB,QAAQ,UAAK,OAAO,EAApC,aAA0B,CAAU,CAAC;QAC3C,MAAM,IAAI,GAAU,CAAC,QAAQ,EAAE,GAAG,EAAE,SAAS,EAAE,GAAG,EAAE,OAAO,EAAE,SAAS,CAAC,CAAC;QAExE,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;YACpD,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;QACxB,CAAC;QAED,MAAM,MAAM,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,IAAI,CAAC,CAAC;IAClC,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC,UAAU,CAAC,KAAK,EAAE,CAAC;QACzC,CAAC;QACD,MAAM,IAAI,CAAC,OAAO,CAAC;IACrB,CAAC;CACF"}

View File

@@ -0,0 +1,291 @@
import { JobProgress } from '../types';
import { IoredisListener, QueueEventsOptions } from '../interfaces';
import { QueueBase } from './queue-base';
import { RedisConnection } from './redis-connection';
export interface QueueEventsListener extends IoredisListener {
/**
* Listen to 'active' event.
*
* This event is triggered when a job enters the 'active' state, meaning it is being processed.
*
* @param args - An object containing details about the job that became active.
* - `jobId`: The unique identifier of the job that entered the active state.
* - `prev`: The previous state of the job before it became active (e.g., 'waiting'), if applicable.
*
* @param id - The identifier of the event.
*/
active: (args: {
jobId: string;
prev?: string;
}, id: string) => void;
/**
* Listen to 'added' event.
*
* This event is triggered when a job is created and added to the queue.
*
* @param args - An object containing details about the newly added job.
* - `jobId` - The unique identifier of the job that was added.
* - `name` - The name of the job, typically indicating its type or purpose.
* @param id - The identifier of the event.
*/
added: (args: {
jobId: string;
name: string;
}, id: string) => void;
/**
* Listen to 'cleaned' event.
*
* This event is triggered when jobs are cleaned (e.g., removed) from the queue, typically via a cleanup method.
*
* @param args - An object containing the count of cleaned jobs.
* - `count` - The number of jobs that were cleaned, represented as a string due to Redis serialization.
* @param id - The identifier of the event.
*/
cleaned: (args: {
count: string;
}, id: string) => void;
/**
* Listen to 'completed' event.
*
* This event is triggered when a job has successfully completed its execution.
*
* @param args - An object containing details about the completed job.
* - `jobId` - The unique identifier of the job that completed.
* - `returnvalue` - The return value of the job, serialized as a string.
* - `prev` - The previous state of the job before completion (e.g., 'active'), if applicable.
* @param id - The identifier of the event.
*/
completed: (args: {
jobId: string;
returnvalue: string;
prev?: string;
}, id: string) => void;
/**
* Listen to 'debounced' event.
*
* @deprecated Use the 'deduplicated' event instead.
*
* This event is triggered when a job is debounced because a job with the same debounceId still exists.
*
* @param args - An object containing details about the debounced job.
* - `jobId` - The unique identifier of the job that was debounced.
* - `debounceId` - The identifier used to debounce the job, preventing duplicate processing.
* @param id - The identifier of the event.
*/
debounced: (args: {
jobId: string;
debounceId: string;
}, id: string) => void;
/**
* Listen to 'deduplicated' event.
*
* This event is triggered when a job is not added to the queue because a job with the same deduplicationId
* already exists.
*
* @param args - An object containing details about the deduplicated job.
* - `jobId` - The unique identifier of the job that was attempted to be added.
* - `deduplicationId` - The deduplication identifier that caused the job to be deduplicated.
* - `deduplicatedJobId` - The unique identifier of the existing job that caused the deduplication.
* @param id - The identifier of the event.
*/
deduplicated: (args: {
jobId: string;
deduplicationId: string;
deduplicatedJobId: string;
}, id: string) => void;
/**
* Listen to 'delayed' event.
*
* This event is triggered when a job is scheduled with a delay before it becomes active.
*
* @param args - An object containing details about the delayed job.
* - `jobId` - The unique identifier of the job that was delayed.
* - `delay` - The delay duration in milliseconds before the job becomes active.
* @param id - The identifier of the event.
*/
delayed: (args: {
jobId: string;
delay: number;
}, id: string) => void;
/**
* Listen to 'drained' event.
*
* This event is triggered when the queue has drained its waiting list, meaning there are no jobs
* in the 'waiting' state.
* Note that there could still be delayed jobs waiting their timers to expire
* and this event will still be triggered as long as the waiting list has emptied.
*
* @param id - The identifier of the event.
*/
drained: (id: string) => void;
/**
* Listen to 'duplicated' event.
*
* This event is triggered when a job is not created because a job with the same identifier already exists.
*
* @param args - An object containing the job identifier.
* - `jobId` - The unique identifier of the job that was attempted to be added.
* @param id - The identifier of the event.
*/
duplicated: (args: {
jobId: string;
}, id: string) => void;
/**
* Listen to 'error' event.
*
* This event is triggered when an error in the Redis backend is thrown.
*/
error: (args: Error) => void;
/**
* Listen to 'failed' event.
*
* This event is triggered when a job fails by throwing an exception during execution.
*
* @param args - An object containing details about the failed job.
* - `jobId` - The unique identifier of the job that failed.
* - `failedReason` - The reason or message describing why the job failed.
* - `prev` - The previous state of the job before failure (e.g., 'active'), if applicable.
* @param id - The identifier of the event.
*/
failed: (args: {
jobId: string;
failedReason: string;
prev?: string;
}, id: string) => void;
/**
* Listen to 'paused' event.
*
* This event is triggered when the queue is paused, halting the processing of new jobs.
*
* @param args - An empty object (no additional data provided).
* @param id - The identifier of the event.
*/
paused: (args: object, id: string) => void;
/**
* Listen to 'progress' event.
*
* This event is triggered when a job updates its progress via the `Job#updateProgress()` method, allowing
* progress or custom data to be communicated externally.
*
* @param args - An object containing the job identifier and progress data.
* - `jobId` - The unique identifier of the job reporting progress.
* - `data` - The progress data, which can be a number (e.g., percentage) or an object with custom data.
* @param id - The identifier of the event.
*/
progress: (args: {
jobId: string;
data: JobProgress;
}, id: string) => void;
/**
* Listen to 'removed' event.
*
* This event is triggered when a job is manually removed from the queue.
*
* @param args - An object containing details about the removed job.
* - `jobId` - The unique identifier of the job that was removed.
* - `prev` - The previous state of the job before removal (e.g., 'active' or 'waiting').
* @param id - The identifier of the event.
*/
removed: (args: {
jobId: string;
prev: string;
}, id: string) => void;
/**
* Listen to 'resumed' event.
*
* This event is triggered when the queue is resumed, allowing job processing to continue.
*
* @param args - An empty object (no additional data provided).
* @param id - The identifier of the event.
*/
resumed: (args: object, id: string) => void;
/**
* Listen to 'retries-exhausted' event.
*
* This event is triggered when a job has exhausted its maximum retry attempts after repeated failures.
*
* @param args - An object containing details about the job that exhausted retries.
* - `jobId` - The unique identifier of the job that exhausted its retries.
* - `attemptsMade` - The number of retry attempts made, represented as a string
* (due to Redis serialization).
* @param id - The identifier of the event.
*/
'retries-exhausted': (args: {
jobId: string;
attemptsMade: string;
}, id: string) => void;
/**
* Listen to 'stalled' event.
*
* This event is triggered when a job moves from 'active' back to 'waiting' or
* 'failed' because the processor could not renew its lock, indicating a
* potential processing issue.
*
* @param args - An object containing the job identifier.
* - `jobId` - The unique identifier of the job that stalled.
* @param id - The identifier of the event.
*/
stalled: (args: {
jobId: string;
}, id: string) => void;
/**
* Listen to 'waiting' event.
*
* This event is triggered when a job enters the 'waiting' state, indicating it is queued and
* awaiting processing.
*
* @param args - An object containing details about the job in the waiting state.
* - `jobId` - The unique identifier of the job that is waiting.
* - `prev` - The previous state of the job before entering 'waiting' (e.g., 'stalled'),
* if applicable.
* @param id - The identifier of the event.
*/
waiting: (args: {
jobId: string;
prev?: string;
}, id: string) => void;
/**
* Listen to 'waiting-children' event.
*
* This event is triggered when a job enters the 'waiting-children' state, indicating it is
* waiting for its child jobs to complete.
*
* @param args - An object containing the job identifier.
* - `jobId` - The unique identifier of the job waiting for its children.
* @param id - The identifier of the event.
*/
'waiting-children': (args: {
jobId: string;
}, id: string) => void;
}
type CustomParameters<T> = T extends (...args: infer Args) => void ? Args : never;
type KeyOf<T extends object> = Extract<keyof T, string>;
/**
* The QueueEvents class is used for listening to the global events
* emitted by a given queue.
*
* This class requires a dedicated redis connection.
*
*/
export declare class QueueEvents extends QueueBase {
private running;
private blocking;
constructor(name: string, { connection, autorun, ...opts }?: QueueEventsOptions, Connection?: typeof RedisConnection);
emit<QEL extends QueueEventsListener = QueueEventsListener, U extends KeyOf<QEL> = KeyOf<QEL>>(event: U, ...args: CustomParameters<QEL[U]>): boolean;
off<QEL extends QueueEventsListener = QueueEventsListener, U extends KeyOf<QEL> = KeyOf<QEL>>(eventName: U, listener: QEL[U]): this;
on<QEL extends QueueEventsListener = QueueEventsListener, U extends KeyOf<QEL> = KeyOf<QEL>>(event: U, listener: QEL[U]): this;
once<QEL extends QueueEventsListener = QueueEventsListener, U extends KeyOf<QEL> = KeyOf<QEL>>(event: U, listener: QEL[U]): this;
/**
* Manually starts running the event consumming loop. This shall be used if you do not
* use the default "autorun" option on the constructor.
*/
run(): Promise<void>;
private consumeEvents;
/**
* Stops consuming events and close the underlying Redis connection if necessary.
*
* @returns
*/
close(): Promise<void>;
}
export {};

View File

@@ -0,0 +1,135 @@
import { __rest } from "tslib";
import { array2obj, clientCommandMessageReg, isRedisInstance, QUEUE_EVENT_SUFFIX, } from '../utils';
import { QueueBase } from './queue-base';
/**
* The QueueEvents class is used for listening to the global events
* emitted by a given queue.
*
* This class requires a dedicated redis connection.
*
*/
export class QueueEvents extends QueueBase {
constructor(name, _a = {
connection: {},
}, Connection) {
var { connection, autorun = true } = _a, opts = __rest(_a, ["connection", "autorun"]);
super(name, Object.assign(Object.assign({}, opts), { connection: isRedisInstance(connection)
? connection.duplicate()
: connection }), Connection, true);
this.running = false;
this.blocking = false;
this.opts = Object.assign({
blockingTimeout: 10000,
}, this.opts);
if (autorun) {
this.run().catch(error => this.emit('error', error));
}
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Manually starts running the event consumming loop. This shall be used if you do not
* use the default "autorun" option on the constructor.
*/
async run() {
if (!this.running) {
try {
this.running = true;
const client = await this.client;
// TODO: Planed for deprecation as it has no really a use case
try {
await client.client('SETNAME', this.clientName(QUEUE_EVENT_SUFFIX));
}
catch (err) {
if (!clientCommandMessageReg.test(err.message)) {
throw err;
}
}
await this.consumeEvents(client);
}
catch (error) {
this.running = false;
throw error;
}
}
else {
throw new Error('Queue Events is already running.');
}
}
async consumeEvents(client) {
const opts = this.opts;
const key = this.keys.events;
let id = opts.lastEventId || '$';
while (!this.closing) {
this.blocking = true;
// Cast to actual return type, see: https://github.com/DefinitelyTyped/DefinitelyTyped/issues/44301
const data = await this.checkConnectionError(() => client.xread('BLOCK', opts.blockingTimeout, 'STREAMS', key, id));
this.blocking = false;
if (data) {
const stream = data[0];
const events = stream[1];
for (let i = 0; i < events.length; i++) {
id = events[i][0];
const args = array2obj(events[i][1]);
//
// TODO: we may need to have a separate xtream for progress data
// to avoid this hack.
switch (args.event) {
case 'progress':
args.data = JSON.parse(args.data);
break;
case 'completed':
args.returnvalue = JSON.parse(args.returnvalue);
break;
}
const { event } = args, restArgs = __rest(args, ["event"]);
if (event === 'drained') {
this.emit(event, id);
}
else {
this.emit(event, restArgs, id);
if (restArgs.jobId) {
this.emit(`${event}:${restArgs.jobId}`, restArgs, id);
}
}
}
}
}
}
/**
* Stops consuming events and close the underlying Redis connection if necessary.
*
* @returns
*/
async close() {
if (!this.closing) {
this.closing = (async () => {
try {
// As the connection has been wrongly markes as "shared" by QueueBase,
// we need to forcibly close it here. We should fix QueueBase to avoid this in the future.
const client = await this.client;
client.disconnect();
await this.connection.close(this.blocking);
}
finally {
this.closed = true;
}
})();
}
return this.closing;
}
}
//# sourceMappingURL=queue-events.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-events.js","sourceRoot":"","sources":["../../../src/classes/queue-events.ts"],"names":[],"mappings":";AAOA,OAAO,EACL,SAAS,EACT,uBAAuB,EACvB,eAAe,EACf,kBAAkB,GACnB,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AA8PzC;;;;;;GAMG;AACH,MAAM,OAAO,WAAY,SAAQ,SAAS;IAIxC,YACE,IAAY,EACZ,KAA8D;QAC5D,UAAU,EAAE,EAAE;KACf,EACD,UAAmC;YAHnC,EAAE,UAAU,EAAE,OAAO,GAAG,IAAI,OAE3B,EAFgC,IAAI,cAArC,yBAAuC,CAAF;QAKrC,KAAK,CACH,IAAI,kCAEC,IAAI,KACP,UAAU,EAAE,eAAe,CAAC,UAAU,CAAC;gBACrC,CAAC,CAAe,UAAW,CAAC,SAAS,EAAE;gBACvC,CAAC,CAAC,UAAU,KAEhB,UAAU,EACV,IAAI,CACL,CAAC;QApBI,YAAO,GAAG,KAAK,CAAC;QAChB,aAAQ,GAAG,KAAK,CAAC;QAqBvB,IAAI,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CACvB;YACE,eAAe,EAAE,KAAK;SACvB,EACD,IAAI,CAAC,IAAI,CACV,CAAC;QAEF,IAAI,OAAO,EAAE,CAAC;YACZ,IAAI,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;QACvD,CAAC;IACH,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,GAAG,IAA8B;QAC3C,OAAO,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC;IACpC,CAAC;IAED,GAAG,CAGD,SAAY,EAAE,QAAgB;QAC9B,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,QAAoC,CAAC,CAAC;QAC3D,OAAO,IAAI,CAAC;IACd,CAAC;IAED,EAAE,CAGA,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACtD,OAAO,IAAI,CAAC;IACd,CAAC;IAED,IAAI,CAGF,KAAQ,EAAE,QAAgB;QAC1B,KAAK,CAAC,IAAI,CAAC,KAAK,EAAE,QAAoC,CAAC,CAAC;QACxD,OAAO,IAAI,CAAC;IACd,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,GAAG;QACP,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC;gBACH,IAAI,CAAC,OAAO,GAAG,IAAI,CAAC;gBACpB,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;gBAEjC,8DAA8D;gBAC9D,IAAI,CAAC;oBACH,MAAM,MAAM,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,CAAC,UAAU,CAAC,kBAAkB,CAAC,CAAC,CAAC;gBACtE,CAAC;gBAAC,OAAO,GAAG,EAAE,CAAC;oBACb,IAAI,CAAC,uBAAuB,CAAC,IAAI,CAAS,GAAI,CAAC,OAAO,CAAC,EAAE,CAAC;wBACxD,MAAM,GAAG,CAAC;oBACZ,CAAC;gBACH,CAAC;gBAED,MAAM,IAAI,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC;YACnC,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;gBACrB,MAAM,KAAK,CAAC;YACd,CAAC;QACH,CAAC;aAAM,CAAC;YACN,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC,CAAC;QACtD,CAAC;IACH,CAAC;IAEO,KAAK,CAAC,aAAa,CAAC,MAAmB;QAC7C,MAAM,IAAI,GAAuB,IAAI,CAAC,IAAI,CAAC;QAE3C,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC;QAC7B,IAAI,EAAE,GAAG,IAAI,CAAC,WAAW,IAAI,GAAG,CAAC;QAEjC,OAAO,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;YACrB,mGAAmG;YACnG,MAAM,IAAI,GAAkB,MAAM,IAAI,CAAC,oBAAoB,CAAC,GAAG,EAAE,CAC/D,MAAM,CAAC,KAAK,CAAC,OAAO,EAAE,IAAI,CAAC,eAAgB,EAAE,SAAS,EAAE,GAAG,EAAE,EAAE,CAAC,CACjE,CAAC;YACF,IAAI,CAAC,QAAQ,GAAG,KAAK,CAAC;YACtB,IAAI,IAAI,EAAE,CAAC;gBACT,MAAM,MAAM,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;gBACvB,MAAM,MAAM,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC;gBAEzB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;oBACvC,EAAE,GAAG,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAClB,MAAM,IAAI,GAAG,SAAS,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;oBAErC,EAAE;oBACF,gEAAgE;oBAChE,sBAAsB;oBACtB,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;wBACnB,KAAK,UAAU;4BACb,IAAI,CAAC,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;4BAClC,MAAM;wBACR,KAAK,WAAW;4BACd,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;4BAChD,MAAM;oBACV,CAAC;oBAED,MAAM,EAAE,KAAK,KAAkB,IAAI,EAAjB,QAAQ,UAAK,IAAI,EAA7B,SAAsB,CAAO,CAAC;oBAEpC,IAAI,KAAK,KAAK,SAAS,EAAE,CAAC;wBACxB,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;oBACvB,CAAC;yBAAM,CAAC;wBACN,IAAI,CAAC,IAAI,CAAC,KAAY,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBACtC,IAAI,QAAQ,CAAC,KAAK,EAAE,CAAC;4BACnB,IAAI,CAAC,IAAI,CAAC,GAAG,KAAK,IAAI,QAAQ,CAAC,KAAK,EAAS,EAAE,QAAQ,EAAE,EAAE,CAAC,CAAC;wBAC/D,CAAC;oBACH,CAAC;gBACH,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,KAAK;QACT,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;YAClB,IAAI,CAAC,OAAO,GAAG,CAAC,KAAK,IAAI,EAAE;gBACzB,IAAI,CAAC;oBACH,sEAAsE;oBACtE,0FAA0F;oBAC1F,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,MAAM,CAAC;oBACjC,MAAM,CAAC,UAAU,EAAE,CAAC;oBACpB,MAAM,IAAI,CAAC,UAAU,CAAC,KAAK,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;gBAC7C,CAAC;wBAAS,CAAC;oBACT,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;gBACrB,CAAC;YACH,CAAC,CAAC,EAAE,CAAC;QACP,CAAC;QACD,OAAO,IAAI,CAAC,OAAO,CAAC;IACtB,CAAC;CACF"}

View File

@@ -0,0 +1,260 @@
import { QueueBase } from './queue-base';
import { Job } from './job';
import { JobState, JobType } from '../types';
import { JobJsonRaw, Metrics, QueueMeta } from '../interfaces';
/**
* Provides different getters for different aspects of a queue.
*/
export declare class QueueGetters<JobBase extends Job = Job> extends QueueBase {
getJob(jobId: string): Promise<JobBase | undefined>;
private commandByType;
private sanitizeJobTypes;
/**
Returns the number of jobs waiting to be processed. This includes jobs that are
"waiting" or "delayed" or "prioritized" or "waiting-children".
*/
count(): Promise<number>;
/**
* Returns the time to live for a rate limited key in milliseconds.
* @param maxJobs - max jobs to be considered in rate limit state. If not passed
* it will return the remaining ttl without considering if max jobs is excedeed.
* @returns -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
* @see {@link https://redis.io/commands/pttl/}
*/
getRateLimitTtl(maxJobs?: number): Promise<number>;
/**
* Get jobId that starts debounced state.
* @deprecated use getDeduplicationJobId method
*
* @param id - debounce identifier
*/
getDebounceJobId(id: string): Promise<string | null>;
/**
* Get jobId from deduplicated state.
*
* @param id - deduplication identifier
*/
getDeduplicationJobId(id: string): Promise<string | null>;
/**
* Get global concurrency value.
* Returns null in case no value is set.
*/
getGlobalConcurrency(): Promise<number | null>;
/**
* Get global rate limit values.
* Returns null in case no value is set.
*/
getGlobalRateLimit(): Promise<{
max: number;
duration: number;
} | null>;
/**
* Job counts by type
*
* Queue#getJobCountByTypes('completed') =\> completed count
* Queue#getJobCountByTypes('completed', 'failed') =\> completed + failed count
* Queue#getJobCountByTypes('completed', 'waiting', 'failed') =\> completed + waiting + failed count
*/
getJobCountByTypes(...types: JobType[]): Promise<number>;
/**
* Returns the job counts for each type specified or every list/set in the queue by default.
*
* @returns An object, key (type) and value (count)
*/
getJobCounts(...types: JobType[]): Promise<{
[index: string]: number;
}>;
/**
* Get current job state.
*
* @param jobId - job identifier.
* @returns Returns one of these values:
* 'completed', 'failed', 'delayed', 'active', 'waiting', 'waiting-children', 'unknown'.
*/
getJobState(jobId: string): Promise<JobState | 'unknown'>;
/**
* Get global queue configuration.
*
* @returns Returns the global queue configuration.
*/
getMeta(): Promise<QueueMeta>;
/**
* @returns Returns the number of jobs in completed status.
*/
getCompletedCount(): Promise<number>;
/**
* Returns the number of jobs in failed status.
*/
getFailedCount(): Promise<number>;
/**
* Returns the number of jobs in delayed status.
*/
getDelayedCount(): Promise<number>;
/**
* Returns the number of jobs in active status.
*/
getActiveCount(): Promise<number>;
/**
* Returns the number of jobs in prioritized status.
*/
getPrioritizedCount(): Promise<number>;
/**
* Returns the number of jobs per priority.
*/
getCountsPerPriority(priorities: number[]): Promise<{
[index: string]: number;
}>;
/**
* Returns the number of jobs in waiting or paused statuses.
*/
getWaitingCount(): Promise<number>;
/**
* Returns the number of jobs in waiting-children status.
*/
getWaitingChildrenCount(): Promise<number>;
/**
* Returns the jobs that are in the "waiting" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaiting(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "waiting-children" status.
* I.E. parent jobs that have at least one child that has not completed yet.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaitingChildren(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "active" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getActive(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "delayed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getDelayed(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "prioritized" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getPrioritized(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "completed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getCompleted(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the jobs that are in the "failed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getFailed(start?: number, end?: number): Promise<JobBase[]>;
/**
* Returns the qualified job ids and the raw job data (if available) of the
* children jobs of the given parent job.
* It is possible to get either the already processed children, in this case
* an array of qualified job ids and their result values will be returned,
* or the pending children, in this case an array of qualified job ids will
* be returned.
* A qualified job id is a string representing the job id in a given queue,
* for example: "bull:myqueue:jobid".
*
* @param parentId - The id of the parent job
* @param type - "processed" | "pending"
* @param opts - Options for the query.
*
* @returns an object with the following shape:
* `{ items: { id: string, v?: any, err?: string } [], jobs: JobJsonRaw[], total: number}`
*/
getDependencies(parentId: string, type: 'processed' | 'pending', start: number, end: number): Promise<{
items: {
id: string;
v?: any;
err?: string;
}[];
jobs: JobJsonRaw[];
total: number;
}>;
getRanges(types: JobType[], start?: number, end?: number, asc?: boolean): Promise<string[]>;
/**
* Returns the jobs that are on the given statuses (note that JobType is synonym for job status)
* @param types - the statuses of the jobs to return.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
getJobs(types?: JobType[] | JobType, start?: number, end?: number, asc?: boolean): Promise<JobBase[]>;
/**
* Returns the logs for a given Job.
* @param jobId - the id of the job to get the logs for.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
getJobLogs(jobId: string, start?: number, end?: number, asc?: boolean): Promise<{
logs: string[];
count: number;
}>;
private baseGetClients;
/**
* Get the worker list related to the queue. i.e. all the known
* workers that are available to process jobs for this queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @returns - Returns an array with workers info.
*/
getWorkers(): Promise<{
[index: string]: string;
}[]>;
/**
* Returns the current count of workers for the queue.
*
* getWorkersCount(): Promise<number>
*
*/
getWorkersCount(): Promise<number>;
/**
* Get queue events list related to the queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @deprecated do not use this method, it will be removed in the future.
*
* @returns - Returns an array with queue events info.
*/
getQueueEvents(): Promise<{
[index: string]: string;
}[]>;
/**
* Get queue metrics related to the queue.
*
* This method returns the gathered metrics for the queue.
* The metrics are represented as an array of job counts
* per unit of time (1 minute).
*
* @param start - Start point of the metrics, where 0
* is the newest point to be returned.
* @param end - End point of the metrics, where -1 is the
* oldest point to be returned.
*
* @returns - Returns an object with queue metrics.
*/
getMetrics(type: 'completed' | 'failed', start?: number, end?: number): Promise<Metrics>;
private parseClientList;
/**
* Export the metrics for the queue in the Prometheus format.
* Automatically exports all the counts returned by getJobCounts().
*
* @returns - Returns a string with the metrics in the Prometheus format.
*
* @see {@link https://prometheus.io/docs/instrumenting/exposition_formats/}
*
**/
exportPrometheusMetrics(globalVariables?: Record<string, string>): Promise<string>;
}

View File

@@ -0,0 +1,506 @@
/*eslint-env node */
'use strict';
import { __rest } from "tslib";
import { QueueBase } from './queue-base';
import { clientCommandMessageReg, QUEUE_EVENT_SUFFIX } from '../utils';
/**
* Provides different getters for different aspects of a queue.
*/
export class QueueGetters extends QueueBase {
getJob(jobId) {
return this.Job.fromId(this, jobId);
}
commandByType(types, count, callback) {
return types.map((type) => {
type = type === 'waiting' ? 'wait' : type; // alias
const key = this.toKey(type);
switch (type) {
case 'completed':
case 'failed':
case 'delayed':
case 'prioritized':
case 'repeat':
case 'waiting-children':
return callback(key, count ? 'zcard' : 'zrange');
case 'active':
case 'wait':
case 'paused':
return callback(key, count ? 'llen' : 'lrange');
}
});
}
sanitizeJobTypes(types) {
const currentTypes = typeof types === 'string' ? [types] : types;
if (Array.isArray(currentTypes) && currentTypes.length > 0) {
const sanitizedTypes = [...currentTypes];
if (sanitizedTypes.indexOf('waiting') !== -1) {
sanitizedTypes.push('paused');
}
return [...new Set(sanitizedTypes)];
}
return [
'active',
'completed',
'delayed',
'failed',
'paused',
'prioritized',
'waiting',
'waiting-children',
];
}
/**
Returns the number of jobs waiting to be processed. This includes jobs that are
"waiting" or "delayed" or "prioritized" or "waiting-children".
*/
async count() {
const count = await this.getJobCountByTypes('waiting', 'paused', 'delayed', 'prioritized', 'waiting-children');
return count;
}
/**
* Returns the time to live for a rate limited key in milliseconds.
* @param maxJobs - max jobs to be considered in rate limit state. If not passed
* it will return the remaining ttl without considering if max jobs is excedeed.
* @returns -2 if the key does not exist.
* -1 if the key exists but has no associated expire.
* @see {@link https://redis.io/commands/pttl/}
*/
async getRateLimitTtl(maxJobs) {
return this.scripts.getRateLimitTtl(maxJobs);
}
/**
* Get jobId that starts debounced state.
* @deprecated use getDeduplicationJobId method
*
* @param id - debounce identifier
*/
async getDebounceJobId(id) {
const client = await this.client;
return client.get(`${this.keys.de}:${id}`);
}
/**
* Get jobId from deduplicated state.
*
* @param id - deduplication identifier
*/
async getDeduplicationJobId(id) {
const client = await this.client;
return client.get(`${this.keys.de}:${id}`);
}
/**
* Get global concurrency value.
* Returns null in case no value is set.
*/
async getGlobalConcurrency() {
const client = await this.client;
const concurrency = await client.hget(this.keys.meta, 'concurrency');
if (concurrency) {
return Number(concurrency);
}
return null;
}
/**
* Get global rate limit values.
* Returns null in case no value is set.
*/
async getGlobalRateLimit() {
const client = await this.client;
const [max, duration] = await client.hmget(this.keys.meta, 'max', 'duration');
if (max && duration) {
return {
max: Number(max),
duration: Number(duration),
};
}
return null;
}
/**
* Job counts by type
*
* Queue#getJobCountByTypes('completed') =\> completed count
* Queue#getJobCountByTypes('completed', 'failed') =\> completed + failed count
* Queue#getJobCountByTypes('completed', 'waiting', 'failed') =\> completed + waiting + failed count
*/
async getJobCountByTypes(...types) {
const result = await this.getJobCounts(...types);
return Object.values(result).reduce((sum, count) => sum + count, 0);
}
/**
* Returns the job counts for each type specified or every list/set in the queue by default.
*
* @returns An object, key (type) and value (count)
*/
async getJobCounts(...types) {
const currentTypes = this.sanitizeJobTypes(types);
const responses = await this.scripts.getCounts(currentTypes);
const counts = {};
responses.forEach((res, index) => {
counts[currentTypes[index]] = res || 0;
});
return counts;
}
/**
* Get current job state.
*
* @param jobId - job identifier.
* @returns Returns one of these values:
* 'completed', 'failed', 'delayed', 'active', 'waiting', 'waiting-children', 'unknown'.
*/
getJobState(jobId) {
return this.scripts.getState(jobId);
}
/**
* Get global queue configuration.
*
* @returns Returns the global queue configuration.
*/
async getMeta() {
const client = await this.client;
const config = await client.hgetall(this.keys.meta);
const { concurrency, max, duration, paused, 'opts.maxLenEvents': maxLenEvents } = config, rest = __rest(config, ["concurrency", "max", "duration", "paused", 'opts.maxLenEvents']);
const parsedConfig = rest;
if (concurrency) {
parsedConfig['concurrency'] = Number(concurrency);
}
if (maxLenEvents) {
parsedConfig['maxLenEvents'] = Number(maxLenEvents);
}
if (max) {
parsedConfig['max'] = Number(max);
}
if (duration) {
parsedConfig['duration'] = Number(duration);
}
parsedConfig['paused'] = paused === '1';
return parsedConfig;
}
/**
* @returns Returns the number of jobs in completed status.
*/
getCompletedCount() {
return this.getJobCountByTypes('completed');
}
/**
* Returns the number of jobs in failed status.
*/
getFailedCount() {
return this.getJobCountByTypes('failed');
}
/**
* Returns the number of jobs in delayed status.
*/
getDelayedCount() {
return this.getJobCountByTypes('delayed');
}
/**
* Returns the number of jobs in active status.
*/
getActiveCount() {
return this.getJobCountByTypes('active');
}
/**
* Returns the number of jobs in prioritized status.
*/
getPrioritizedCount() {
return this.getJobCountByTypes('prioritized');
}
/**
* Returns the number of jobs per priority.
*/
async getCountsPerPriority(priorities) {
const uniquePriorities = [...new Set(priorities)];
const responses = await this.scripts.getCountsPerPriority(uniquePriorities);
const counts = {};
responses.forEach((res, index) => {
counts[`${uniquePriorities[index]}`] = res || 0;
});
return counts;
}
/**
* Returns the number of jobs in waiting or paused statuses.
*/
getWaitingCount() {
return this.getJobCountByTypes('waiting');
}
/**
* Returns the number of jobs in waiting-children status.
*/
getWaitingChildrenCount() {
return this.getJobCountByTypes('waiting-children');
}
/**
* Returns the jobs that are in the "waiting" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaiting(start = 0, end = -1) {
return this.getJobs(['waiting'], start, end, true);
}
/**
* Returns the jobs that are in the "waiting-children" status.
* I.E. parent jobs that have at least one child that has not completed yet.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getWaitingChildren(start = 0, end = -1) {
return this.getJobs(['waiting-children'], start, end, true);
}
/**
* Returns the jobs that are in the "active" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getActive(start = 0, end = -1) {
return this.getJobs(['active'], start, end, true);
}
/**
* Returns the jobs that are in the "delayed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getDelayed(start = 0, end = -1) {
return this.getJobs(['delayed'], start, end, true);
}
/**
* Returns the jobs that are in the "prioritized" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getPrioritized(start = 0, end = -1) {
return this.getJobs(['prioritized'], start, end, true);
}
/**
* Returns the jobs that are in the "completed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getCompleted(start = 0, end = -1) {
return this.getJobs(['completed'], start, end, false);
}
/**
* Returns the jobs that are in the "failed" status.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
*/
getFailed(start = 0, end = -1) {
return this.getJobs(['failed'], start, end, false);
}
/**
* Returns the qualified job ids and the raw job data (if available) of the
* children jobs of the given parent job.
* It is possible to get either the already processed children, in this case
* an array of qualified job ids and their result values will be returned,
* or the pending children, in this case an array of qualified job ids will
* be returned.
* A qualified job id is a string representing the job id in a given queue,
* for example: "bull:myqueue:jobid".
*
* @param parentId - The id of the parent job
* @param type - "processed" | "pending"
* @param opts - Options for the query.
*
* @returns an object with the following shape:
* `{ items: { id: string, v?: any, err?: string } [], jobs: JobJsonRaw[], total: number}`
*/
async getDependencies(parentId, type, start, end) {
const key = this.toKey(type == 'processed'
? `${parentId}:processed`
: `${parentId}:dependencies`);
const { items, total, jobs } = await this.scripts.paginate(key, {
start,
end,
fetchJobs: true,
});
return {
items,
jobs,
total,
};
}
async getRanges(types, start = 0, end = 1, asc = false) {
const multiCommands = [];
this.commandByType(types, false, (key, command) => {
switch (command) {
case 'lrange':
multiCommands.push('lrange');
break;
case 'zrange':
multiCommands.push('zrange');
break;
}
});
const responses = await this.scripts.getRanges(types, start, end, asc);
let results = [];
responses.forEach((response, index) => {
const result = response || [];
if (asc && multiCommands[index] === 'lrange') {
results = results.concat(result.reverse());
}
else {
results = results.concat(result);
}
});
return [...new Set(results)];
}
/**
* Returns the jobs that are on the given statuses (note that JobType is synonym for job status)
* @param types - the statuses of the jobs to return.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
async getJobs(types, start = 0, end = -1, asc = false) {
const currentTypes = this.sanitizeJobTypes(types);
const jobIds = await this.getRanges(currentTypes, start, end, asc);
return Promise.all(jobIds.map(jobId => this.Job.fromId(this, jobId)));
}
/**
* Returns the logs for a given Job.
* @param jobId - the id of the job to get the logs for.
* @param start - zero based index from where to start returning jobs.
* @param end - zero based index where to stop returning jobs.
* @param asc - if true, the jobs will be returned in ascending order.
*/
async getJobLogs(jobId, start = 0, end = -1, asc = true) {
const client = await this.client;
const multi = client.multi();
const logsKey = this.toKey(jobId + ':logs');
if (asc) {
multi.lrange(logsKey, start, end);
}
else {
multi.lrange(logsKey, -(end + 1), -(start + 1));
}
multi.llen(logsKey);
const result = (await multi.exec());
if (!asc) {
result[0][1].reverse();
}
return {
logs: result[0][1],
count: result[1][1],
};
}
async baseGetClients(matcher) {
const client = await this.client;
try {
const clients = (await client.client('LIST'));
const list = this.parseClientList(clients, matcher);
return list;
}
catch (err) {
if (!clientCommandMessageReg.test(err.message)) {
throw err;
}
return [{ name: 'GCP does not support client list' }];
}
}
/**
* Get the worker list related to the queue. i.e. all the known
* workers that are available to process jobs for this queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @returns - Returns an array with workers info.
*/
getWorkers() {
const unnamedWorkerClientName = `${this.clientName()}`;
const namedWorkerClientName = `${this.clientName()}:w:`;
const matcher = (name) => name &&
(name === unnamedWorkerClientName ||
name.startsWith(namedWorkerClientName));
return this.baseGetClients(matcher);
}
/**
* Returns the current count of workers for the queue.
*
* getWorkersCount(): Promise<number>
*
*/
async getWorkersCount() {
const workers = await this.getWorkers();
return workers.length;
}
/**
* Get queue events list related to the queue.
* Note: GCP does not support SETNAME, so this call will not work
*
* @deprecated do not use this method, it will be removed in the future.
*
* @returns - Returns an array with queue events info.
*/
async getQueueEvents() {
const clientName = `${this.clientName()}${QUEUE_EVENT_SUFFIX}`;
return this.baseGetClients((name) => name === clientName);
}
/**
* Get queue metrics related to the queue.
*
* This method returns the gathered metrics for the queue.
* The metrics are represented as an array of job counts
* per unit of time (1 minute).
*
* @param start - Start point of the metrics, where 0
* is the newest point to be returned.
* @param end - End point of the metrics, where -1 is the
* oldest point to be returned.
*
* @returns - Returns an object with queue metrics.
*/
async getMetrics(type, start = 0, end = -1) {
const [meta, data, count] = await this.scripts.getMetrics(type, start, end);
return {
meta: {
count: parseInt(meta[0] || '0', 10),
prevTS: parseInt(meta[1] || '0', 10),
prevCount: parseInt(meta[2] || '0', 10),
},
data: data.map(point => +point || 0),
count,
};
}
parseClientList(list, matcher) {
const lines = list.split(/\r?\n/);
const clients = [];
lines.forEach((line) => {
const client = {};
const keyValues = line.split(' ');
keyValues.forEach(function (keyValue) {
const index = keyValue.indexOf('=');
const key = keyValue.substring(0, index);
const value = keyValue.substring(index + 1);
client[key] = value;
});
const name = client['name'];
if (matcher(name)) {
client['name'] = this.name;
client['rawname'] = name;
clients.push(client);
}
});
return clients;
}
/**
* Export the metrics for the queue in the Prometheus format.
* Automatically exports all the counts returned by getJobCounts().
*
* @returns - Returns a string with the metrics in the Prometheus format.
*
* @see {@link https://prometheus.io/docs/instrumenting/exposition_formats/}
*
**/
async exportPrometheusMetrics(globalVariables) {
const counts = await this.getJobCounts();
const metrics = [];
// Match the test's expected HELP text
metrics.push('# HELP bullmq_job_count Number of jobs in the queue by state');
metrics.push('# TYPE bullmq_job_count gauge');
const variables = !globalVariables
? ''
: Object.keys(globalVariables).reduce((acc, curr) => `${acc}, ${curr}="${globalVariables[curr]}"`, '');
for (const [state, count] of Object.entries(counts)) {
metrics.push(`bullmq_job_count{queue="${this.name}", state="${state}"${variables}} ${count}`);
}
return metrics.join('\n');
}
}
//# sourceMappingURL=queue-getters.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,10 @@
export type KeysMap = {
[index in string]: string;
};
export declare class QueueKeys {
readonly prefix: string;
constructor(prefix?: string);
getKeys(name: string): KeysMap;
toKey(name: string, type: string): string;
getQueueQualifiedName(name: string): string;
}

View File

@@ -0,0 +1,39 @@
export class QueueKeys {
constructor(prefix = 'bull') {
this.prefix = prefix;
}
getKeys(name) {
const keys = {};
[
'',
'active',
'wait',
'waiting-children',
'paused',
'id',
'delayed',
'prioritized',
'stalled-check',
'completed',
'failed',
'stalled',
'repeat',
'limiter',
'meta',
'events',
'pc', // priority counter key
'marker', // marker key
'de', // deduplication key
].forEach(key => {
keys[key] = this.toKey(name, key);
});
return keys;
}
toKey(name, type) {
return `${this.getQueueQualifiedName(name)}:${type}`;
}
getQueueQualifiedName(name) {
return `${this.prefix}:${name}`;
}
}
//# sourceMappingURL=queue-keys.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"queue-keys.js","sourceRoot":"","sources":["../../../src/classes/queue-keys.ts"],"names":[],"mappings":"AAEA,MAAM,OAAO,SAAS;IACpB,YAA4B,SAAS,MAAM;QAAf,WAAM,GAAN,MAAM,CAAS;IAAG,CAAC;IAE/C,OAAO,CAAC,IAAY;QAClB,MAAM,IAAI,GAAgC,EAAE,CAAC;QAC7C;YACE,EAAE;YACF,QAAQ;YACR,MAAM;YACN,kBAAkB;YAClB,QAAQ;YACR,IAAI;YACJ,SAAS;YACT,aAAa;YACb,eAAe;YACf,WAAW;YACX,QAAQ;YACR,SAAS;YACT,QAAQ;YACR,SAAS;YACT,MAAM;YACN,QAAQ;YACR,IAAI,EAAE,uBAAuB;YAC7B,QAAQ,EAAE,aAAa;YACvB,IAAI,EAAE,oBAAoB;SAC3B,CAAC,OAAO,CAAC,GAAG,CAAC,EAAE;YACd,IAAI,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,EAAE,GAAG,CAAC,CAAC;QACpC,CAAC,CAAC,CAAC;QAEH,OAAO,IAAI,CAAC;IACd,CAAC;IAED,KAAK,CAAC,IAAY,EAAE,IAAY;QAC9B,OAAO,GAAG,IAAI,CAAC,qBAAqB,CAAC,IAAI,CAAC,IAAI,IAAI,EAAE,CAAC;IACvD,CAAC;IAED,qBAAqB,CAAC,IAAY;QAChC,OAAO,GAAG,IAAI,CAAC,MAAM,IAAI,IAAI,EAAE,CAAC;IAClC,CAAC;CACF"}

421
backend/node_modules/bullmq/dist/esm/classes/queue.d.ts generated vendored Normal file
View File

@@ -0,0 +1,421 @@
import { BaseJobOptions, BulkJobOptions, IoredisListener, JobSchedulerJson, QueueOptions, RepeatableJob, RepeatOptions } from '../interfaces';
import { FinishedStatus, JobsOptions, JobSchedulerTemplateOptions, JobProgress } from '../types';
import { Job } from './job';
import { QueueGetters } from './queue-getters';
import { Repeat } from './repeat';
import { RedisConnection } from './redis-connection';
import { JobScheduler } from './job-scheduler';
export interface ObliterateOpts {
/**
* Use force = true to force obliteration even with active jobs in the queue
* @defaultValue false
*/
force?: boolean;
/**
* Use count with the maximum number of deleted keys per iteration
* @defaultValue 1000
*/
count?: number;
}
export interface QueueListener<JobBase extends Job = Job> extends IoredisListener {
/**
* Listen to 'cleaned' event.
*
* This event is triggered when the queue calls clean method.
*/
cleaned: (jobs: string[], type: string) => void;
/**
* Listen to 'error' event.
*
* This event is triggered when an error is thrown.
*/
error: (err: Error) => void;
/**
* Listen to 'paused' event.
*
* This event is triggered when the queue is paused.
*/
paused: () => void;
/**
* Listen to 'progress' event.
*
* This event is triggered when the job updates its progress.
*/
progress: (jobId: string, progress: JobProgress) => void;
/**
* Listen to 'removed' event.
*
* This event is triggered when a job is removed.
*/
removed: (jobId: string) => void;
/**
* Listen to 'resumed' event.
*
* This event is triggered when the queue is resumed.
*/
resumed: () => void;
/**
* Listen to 'waiting' event.
*
* This event is triggered when the queue creates a new job.
*/
waiting: (job: JobBase) => void;
}
/**
* IsAny<T> A type helper to determine if a given type `T` is `any`.
* This works by using `any` type with the intersection
* operator (`&`). If `T` is `any`, then `1 & T` resolves to `any`, and since `0`
* is assignable to `any`, the conditional type returns `true`.
*/
type IsAny<T> = 0 extends 1 & T ? true : false;
type JobBase<T, ResultType, NameType extends string> = IsAny<T> extends true ? Job<T, ResultType, NameType> : T extends Job<any, any, any> ? T : Job<T, ResultType, NameType>;
type ExtractDataType<DataTypeOrJob, Default> = DataTypeOrJob extends Job<infer D, any, any> ? D : Default;
type ExtractResultType<DataTypeOrJob, Default> = DataTypeOrJob extends Job<any, infer R, any> ? R : Default;
type ExtractNameType<DataTypeOrJob, Default extends string> = DataTypeOrJob extends Job<any, any, infer N> ? N : Default;
/**
* Queue
*
* This class provides methods to add jobs to a queue and some other high-level
* administration such as pausing or deleting queues.
*
* @typeParam DataType - The type of the data that the job will process.
* @typeParam ResultType - The type of the result of the job.
* @typeParam NameType - The type of the name of the job.
*
* @example
*
* ```typescript
* import { Queue } from 'bullmq';
*
* interface MyDataType {
* foo: string;
* }
*
* interface MyResultType {
* bar: string;
* }
*
* const queue = new Queue<MyDataType, MyResultType, "blue" | "brown">('myQueue');
* ```
*/
export declare class Queue<DataTypeOrJob = any, DefaultResultType = any, DefaultNameType extends string = string, DataType = ExtractDataType<DataTypeOrJob, DataTypeOrJob>, ResultType = ExtractResultType<DataTypeOrJob, DefaultResultType>, NameType extends string = ExtractNameType<DataTypeOrJob, DefaultNameType>> extends QueueGetters<JobBase<DataTypeOrJob, ResultType, NameType>> {
token: string;
jobsOpts: BaseJobOptions;
opts: QueueOptions;
protected libName: string;
protected _repeat?: Repeat;
protected _jobScheduler?: JobScheduler;
constructor(name: string, opts?: QueueOptions, Connection?: typeof RedisConnection);
emit<U extends keyof QueueListener<JobBase<DataType, ResultType, NameType>>>(event: U, ...args: Parameters<QueueListener<JobBase<DataType, ResultType, NameType>>[U]>): boolean;
off<U extends keyof QueueListener<JobBase<DataType, ResultType, NameType>>>(eventName: U, listener: QueueListener<JobBase<DataType, ResultType, NameType>>[U]): this;
on<U extends keyof QueueListener<JobBase<DataType, ResultType, NameType>>>(event: U, listener: QueueListener<JobBase<DataType, ResultType, NameType>>[U]): this;
once<U extends keyof QueueListener<JobBase<DataType, ResultType, NameType>>>(event: U, listener: QueueListener<JobBase<DataType, ResultType, NameType>>[U]): this;
/**
* Returns this instance current default job options.
*/
get defaultJobOptions(): JobsOptions;
get metaValues(): Record<string, string | number>;
/**
* Get library version.
*
* @returns the content of the meta.library field.
*/
getVersion(): Promise<string>;
get repeat(): Promise<Repeat>;
get jobScheduler(): Promise<JobScheduler>;
/**
* Enable and set global concurrency value.
* @param concurrency - Maximum number of simultaneous jobs that the workers can handle.
* For instance, setting this value to 1 ensures that no more than one job
* is processed at any given time. If this limit is not defined, there will be no
* restriction on the number of concurrent jobs.
*/
setGlobalConcurrency(concurrency: number): Promise<number>;
/**
* Enable and set rate limit.
* @param max - Max number of jobs to process in the time period specified in `duration`
* @param duration - Time in milliseconds. During this time, a maximum of `max` jobs will be processed.
*/
setGlobalRateLimit(max: number, duration: number): Promise<number>;
/**
* Remove global concurrency value.
*/
removeGlobalConcurrency(): Promise<number>;
/**
* Remove global rate limit values.
*/
removeGlobalRateLimit(): Promise<number>;
/**
* Adds a new job to the queue.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*/
add(name: NameType, data: DataType, opts?: JobsOptions): Promise<Job<DataType, ResultType, NameType>>;
/**
* addJob is a telemetry free version of the add method, useful in order to wrap it
* with custom telemetry on subclasses.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*
* @returns Job
*/
protected addJob(name: NameType, data: DataType, opts?: JobsOptions): Promise<Job<DataType, ResultType, NameType>>;
/**
* Adds an array of jobs to the queue. This method may be faster than adding
* one job at a time in a sequence.
*
* @param jobs - The array of jobs to add to the queue. Each job is defined by 3
* properties, 'name', 'data' and 'opts'. They follow the same signature as 'Queue.add'.
*/
addBulk(jobs: {
name: NameType;
data: DataType;
opts?: BulkJobOptions;
}[]): Promise<Job<DataType, ResultType, NameType>[]>;
/**
* Upserts a scheduler.
*
* A scheduler is a job factory that creates jobs at a given interval.
* Upserting a scheduler will create a new job scheduler or update an existing one.
* It will also create the first job based on the repeat options and delayed accordingly.
*
* @param key - Unique key for the repeatable job meta.
* @param repeatOpts - Repeat options
* @param jobTemplate - Job template. If provided it will be used for all the jobs
* created by the scheduler.
*
* @returns The next job to be scheduled (would normally be in delayed state).
*/
upsertJobScheduler(jobSchedulerId: NameType, repeatOpts: Omit<RepeatOptions, 'key'>, jobTemplate?: {
name?: NameType;
data?: DataType;
opts?: JobSchedulerTemplateOptions;
}): Promise<Job<DataType, ResultType, NameType>>;
/**
* Pauses the processing of this queue globally.
*
* We use an atomic RENAME operation on the wait queue. Since
* we have blocking calls with BRPOPLPUSH on the wait queue, as long as the queue
* is renamed to 'paused', no new jobs will be processed (the current ones
* will run until finalized).
*
* Adding jobs requires a LUA script to check first if the paused list exist
* and in that case it will add it there instead of the wait list.
*/
pause(): Promise<void>;
/**
* Close the queue instance.
*
*/
close(): Promise<void>;
/**
* Overrides the rate limit to be active for the next jobs.
*
* @param expireTimeMs - expire time in ms of this rate limit.
*/
rateLimit(expireTimeMs: number): Promise<void>;
/**
* Resumes the processing of this queue globally.
*
* The method reverses the pause operation by resuming the processing of the
* queue.
*/
resume(): Promise<void>;
/**
* Returns true if the queue is currently paused.
*/
isPaused(): Promise<boolean>;
/**
* Returns true if the queue is currently maxed.
*/
isMaxed(): Promise<boolean>;
/**
* Get all repeatable meta jobs.
*
* @deprecated This method is deprecated and will be removed in v6. Use getJobSchedulers instead.
*
* @param start - Offset of first job to return.
* @param end - Offset of last job to return.
* @param asc - Determine the order in which jobs are returned based on their
* next execution time.
*/
getRepeatableJobs(start?: number, end?: number, asc?: boolean): Promise<RepeatableJob[]>;
/**
* Get Job Scheduler by id
*
* @param id - identifier of scheduler.
*/
getJobScheduler(id: string): Promise<JobSchedulerJson<DataType> | undefined>;
/**
* Get all Job Schedulers
*
* @param start - Offset of first scheduler to return.
* @param end - Offset of last scheduler to return.
* @param asc - Determine the order in which schedulers are returned based on their
* next execution time.
*/
getJobSchedulers(start?: number, end?: number, asc?: boolean): Promise<JobSchedulerJson<DataType>[]>;
/**
*
* Get the number of job schedulers.
*
* @returns The number of job schedulers.
*/
getJobSchedulersCount(): Promise<number>;
/**
* Removes a repeatable job.
*
* Note: you need to use the exact same repeatOpts when deleting a repeatable job
* than when adding it.
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @see removeRepeatableByKey
*
* @param name - Job name
* @param repeatOpts - Repeat options
* @param jobId - Job id to remove. If not provided, all jobs with the same repeatOpts
* @returns
*/
removeRepeatable(name: NameType, repeatOpts: RepeatOptions, jobId?: string): Promise<boolean>;
/**
*
* Removes a job scheduler.
*
* @param jobSchedulerId - identifier of the job scheduler.
*
* @returns
*/
removeJobScheduler(jobSchedulerId: string): Promise<boolean>;
/**
* Removes a debounce key.
* @deprecated use removeDeduplicationKey
*
* @param id - debounce identifier
*/
removeDebounceKey(id: string): Promise<number>;
/**
* Removes a deduplication key.
*
* @param id - identifier
*/
removeDeduplicationKey(id: string): Promise<number>;
/**
* Removes rate limit key.
*/
removeRateLimitKey(): Promise<number>;
/**
* Removes a repeatable job by its key. Note that the key is the one used
* to store the repeatable job metadata and not one of the job iterations
* themselves. You can use "getRepeatableJobs" in order to get the keys.
*
* @see getRepeatableJobs
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @param repeatJobKey - To the repeatable job.
* @returns
*/
removeRepeatableByKey(key: string): Promise<boolean>;
/**
* Removes the given job from the queue as well as all its
* dependencies.
*
* @param jobId - The id of the job to remove
* @param opts - Options to remove a job
* @returns 1 if it managed to remove the job or 0 if the job or
* any of its dependencies were locked.
*/
remove(jobId: string, { removeChildren }?: {
removeChildren?: boolean;
}): Promise<number>;
/**
* Updates the given job's progress.
*
* @param jobId - The id of the job to update
* @param progress - Number or object to be saved as progress.
*/
updateJobProgress(jobId: string, progress: JobProgress): Promise<void>;
/**
* Logs one row of job's log data.
*
* @param jobId - The job id to log against.
* @param logRow - String with log data to be logged.
* @param keepLogs - Max number of log entries to keep (0 for unlimited).
*
* @returns The total number of log entries for this job so far.
*/
addJobLog(jobId: string, logRow: string, keepLogs?: number): Promise<number>;
/**
* Drains the queue, i.e., removes all jobs that are waiting
* or delayed, but not active, completed or failed.
*
* @param delayed - Pass true if it should also clean the
* delayed jobs.
*/
drain(delayed?: boolean): Promise<void>;
/**
* Cleans jobs from a queue. Similar to drain but keeps jobs within a certain
* grace period.
*
* @param grace - The grace period in milliseconds
* @param limit - Max number of jobs to clean
* @param type - The type of job to clean
* Possible values are completed, wait, active, paused, delayed, failed. Defaults to completed.
* @returns Id jobs from the deleted records
*/
clean(grace: number, limit: number, type?: 'completed' | 'wait' | 'waiting' | 'active' | 'paused' | 'prioritized' | 'delayed' | 'failed'): Promise<string[]>;
/**
* Completely destroys the queue and all of its contents irreversibly.
* This method will *pause* the queue and requires that there are no
* active jobs. It is possible to bypass this requirement, i.e. not
* having active jobs using the "force" option.
*
* Note: This operation requires to iterate on all the jobs stored in the queue
* and can be slow for very large queues.
*
* @param opts - Obliterate options.
*/
obliterate(opts?: ObliterateOpts): Promise<void>;
/**
* Retry all the failed or completed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration,
* - state failed by default or completed.
* - timestamp from which timestamp to start moving jobs to wait status, default Date.now().
*
* @returns
*/
retryJobs(opts?: {
count?: number;
state?: FinishedStatus;
timestamp?: number;
}): Promise<void>;
/**
* Promote all the delayed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration
*
* @returns
*/
promoteJobs(opts?: {
count?: number;
}): Promise<void>;
/**
* Trim the event stream to an approximately maxLength.
*
* @param maxLength -
*/
trimEvents(maxLength: number): Promise<number>;
/**
* Delete old priority helper key.
*/
removeDeprecatedPriorityKey(): Promise<number>;
}
export {};

648
backend/node_modules/bullmq/dist/esm/classes/queue.js generated vendored Normal file
View File

@@ -0,0 +1,648 @@
import { v4 } from 'uuid';
import { Job } from './job';
import { QueueGetters } from './queue-getters';
import { Repeat } from './repeat';
import { SpanKind, TelemetryAttributes } from '../enums';
import { JobScheduler } from './job-scheduler';
import { version } from '../version';
/**
* Queue
*
* This class provides methods to add jobs to a queue and some other high-level
* administration such as pausing or deleting queues.
*
* @typeParam DataType - The type of the data that the job will process.
* @typeParam ResultType - The type of the result of the job.
* @typeParam NameType - The type of the name of the job.
*
* @example
*
* ```typescript
* import { Queue } from 'bullmq';
*
* interface MyDataType {
* foo: string;
* }
*
* interface MyResultType {
* bar: string;
* }
*
* const queue = new Queue<MyDataType, MyResultType, "blue" | "brown">('myQueue');
* ```
*/
export class Queue extends QueueGetters {
constructor(name, opts, Connection) {
var _a;
super(name, Object.assign({}, opts), Connection);
this.token = v4();
this.libName = 'bullmq';
this.jobsOpts = (_a = opts === null || opts === void 0 ? void 0 : opts.defaultJobOptions) !== null && _a !== void 0 ? _a : {};
this.waitUntilReady()
.then(client => {
if (!this.closing && !(opts === null || opts === void 0 ? void 0 : opts.skipMetasUpdate)) {
return client.hmset(this.keys.meta, this.metaValues);
}
})
.catch(err => {
// We ignore this error to avoid warnings. The error can still
// be received by listening to event 'error'
});
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
/**
* Returns this instance current default job options.
*/
get defaultJobOptions() {
return Object.assign({}, this.jobsOpts);
}
get metaValues() {
var _a, _b, _c, _d;
return {
'opts.maxLenEvents': (_d = (_c = (_b = (_a = this.opts) === null || _a === void 0 ? void 0 : _a.streams) === null || _b === void 0 ? void 0 : _b.events) === null || _c === void 0 ? void 0 : _c.maxLen) !== null && _d !== void 0 ? _d : 10000,
version: `${this.libName}:${version}`,
};
}
/**
* Get library version.
*
* @returns the content of the meta.library field.
*/
async getVersion() {
const client = await this.client;
return await client.hget(this.keys.meta, 'version');
}
get repeat() {
return new Promise(async (resolve) => {
if (!this._repeat) {
this._repeat = new Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
this._repeat.on('error', e => this.emit.bind(this, e));
}
resolve(this._repeat);
});
}
get jobScheduler() {
return new Promise(async (resolve) => {
if (!this._jobScheduler) {
this._jobScheduler = new JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection: await this.client }));
this._jobScheduler.on('error', e => this.emit.bind(this, e));
}
resolve(this._jobScheduler);
});
}
/**
* Enable and set global concurrency value.
* @param concurrency - Maximum number of simultaneous jobs that the workers can handle.
* For instance, setting this value to 1 ensures that no more than one job
* is processed at any given time. If this limit is not defined, there will be no
* restriction on the number of concurrent jobs.
*/
async setGlobalConcurrency(concurrency) {
const client = await this.client;
return client.hset(this.keys.meta, 'concurrency', concurrency);
}
/**
* Enable and set rate limit.
* @param max - Max number of jobs to process in the time period specified in `duration`
* @param duration - Time in milliseconds. During this time, a maximum of `max` jobs will be processed.
*/
async setGlobalRateLimit(max, duration) {
const client = await this.client;
return client.hset(this.keys.meta, 'max', max, 'duration', duration);
}
/**
* Remove global concurrency value.
*/
async removeGlobalConcurrency() {
const client = await this.client;
return client.hdel(this.keys.meta, 'concurrency');
}
/**
* Remove global rate limit values.
*/
async removeGlobalRateLimit() {
const client = await this.client;
return client.hdel(this.keys.meta, 'max', 'duration');
}
/**
* Adds a new job to the queue.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*/
async add(name, data, opts) {
return this.trace(SpanKind.PRODUCER, 'add', `${this.name}.${name}`, async (span, srcPropagationMedatada) => {
var _a;
if (srcPropagationMedatada && !((_a = opts === null || opts === void 0 ? void 0 : opts.telemetry) === null || _a === void 0 ? void 0 : _a.omitContext)) {
const telemetry = {
metadata: srcPropagationMedatada,
};
opts = Object.assign(Object.assign({}, opts), { telemetry });
}
const job = await this.addJob(name, data, opts);
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobName]: name,
[TelemetryAttributes.JobId]: job.id,
});
return job;
});
}
/**
* addJob is a telemetry free version of the add method, useful in order to wrap it
* with custom telemetry on subclasses.
*
* @param name - Name of the job to be added to the queue.
* @param data - Arbitrary data to append to the job.
* @param opts - Job options that affects how the job is going to be processed.
*
* @returns Job
*/
async addJob(name, data, opts) {
if (opts && opts.repeat) {
if (opts.repeat.endDate) {
if (+new Date(opts.repeat.endDate) < Date.now()) {
throw new Error('End date must be greater than current timestamp');
}
}
return (await this.repeat).updateRepeatableJob(name, data, Object.assign(Object.assign({}, this.jobsOpts), opts), { override: true });
}
else {
const jobId = opts === null || opts === void 0 ? void 0 : opts.jobId;
if (jobId == '0' || (jobId === null || jobId === void 0 ? void 0 : jobId.startsWith('0:'))) {
throw new Error("JobId cannot be '0' or start with 0:");
}
const job = await this.Job.create(this, name, data, Object.assign(Object.assign(Object.assign({}, this.jobsOpts), opts), { jobId }));
this.emit('waiting', job);
return job;
}
}
/**
* Adds an array of jobs to the queue. This method may be faster than adding
* one job at a time in a sequence.
*
* @param jobs - The array of jobs to add to the queue. Each job is defined by 3
* properties, 'name', 'data' and 'opts'. They follow the same signature as 'Queue.add'.
*/
async addBulk(jobs) {
return this.trace(SpanKind.PRODUCER, 'addBulk', this.name, async (span, srcPropagationMedatada) => {
if (span) {
span.setAttributes({
[TelemetryAttributes.BulkNames]: jobs.map(job => job.name),
[TelemetryAttributes.BulkCount]: jobs.length,
});
}
return await this.Job.createBulk(this, jobs.map(job => {
var _a, _b, _c, _d, _e, _f;
let telemetry = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry;
if (srcPropagationMedatada) {
const omitContext = (_c = (_b = job.opts) === null || _b === void 0 ? void 0 : _b.telemetry) === null || _c === void 0 ? void 0 : _c.omitContext;
const telemetryMetadata = ((_e = (_d = job.opts) === null || _d === void 0 ? void 0 : _d.telemetry) === null || _e === void 0 ? void 0 : _e.metadata) ||
(!omitContext && srcPropagationMedatada);
if (telemetryMetadata || omitContext) {
telemetry = {
metadata: telemetryMetadata,
omitContext,
};
}
}
return {
name: job.name,
data: job.data,
opts: Object.assign(Object.assign(Object.assign({}, this.jobsOpts), job.opts), { jobId: (_f = job.opts) === null || _f === void 0 ? void 0 : _f.jobId, telemetry }),
};
}));
});
}
/**
* Upserts a scheduler.
*
* A scheduler is a job factory that creates jobs at a given interval.
* Upserting a scheduler will create a new job scheduler or update an existing one.
* It will also create the first job based on the repeat options and delayed accordingly.
*
* @param key - Unique key for the repeatable job meta.
* @param repeatOpts - Repeat options
* @param jobTemplate - Job template. If provided it will be used for all the jobs
* created by the scheduler.
*
* @returns The next job to be scheduled (would normally be in delayed state).
*/
async upsertJobScheduler(jobSchedulerId, repeatOpts, jobTemplate) {
var _a, _b;
if (repeatOpts.endDate) {
if (+new Date(repeatOpts.endDate) < Date.now()) {
throw new Error('End date must be greater than current timestamp');
}
}
return (await this.jobScheduler).upsertJobScheduler(jobSchedulerId, repeatOpts, (_a = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.name) !== null && _a !== void 0 ? _a : jobSchedulerId, (_b = jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.data) !== null && _b !== void 0 ? _b : {}, Object.assign(Object.assign({}, this.jobsOpts), jobTemplate === null || jobTemplate === void 0 ? void 0 : jobTemplate.opts), { override: true });
}
/**
* Pauses the processing of this queue globally.
*
* We use an atomic RENAME operation on the wait queue. Since
* we have blocking calls with BRPOPLPUSH on the wait queue, as long as the queue
* is renamed to 'paused', no new jobs will be processed (the current ones
* will run until finalized).
*
* Adding jobs requires a LUA script to check first if the paused list exist
* and in that case it will add it there instead of the wait list.
*/
async pause() {
await this.trace(SpanKind.INTERNAL, 'pause', this.name, async () => {
await this.scripts.pause(true);
this.emit('paused');
});
}
/**
* Close the queue instance.
*
*/
async close() {
await this.trace(SpanKind.INTERNAL, 'close', this.name, async () => {
if (!this.closing) {
if (this._repeat) {
await this._repeat.close();
}
}
await super.close();
});
}
/**
* Overrides the rate limit to be active for the next jobs.
*
* @param expireTimeMs - expire time in ms of this rate limit.
*/
async rateLimit(expireTimeMs) {
await this.trace(SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueRateLimit]: expireTimeMs,
});
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
});
}
/**
* Resumes the processing of this queue globally.
*
* The method reverses the pause operation by resuming the processing of the
* queue.
*/
async resume() {
await this.trace(SpanKind.INTERNAL, 'resume', this.name, async () => {
await this.scripts.pause(false);
this.emit('resumed');
});
}
/**
* Returns true if the queue is currently paused.
*/
async isPaused() {
const client = await this.client;
const pausedKeyExists = await client.hexists(this.keys.meta, 'paused');
return pausedKeyExists === 1;
}
/**
* Returns true if the queue is currently maxed.
*/
isMaxed() {
return this.scripts.isMaxed();
}
/**
* Get all repeatable meta jobs.
*
* @deprecated This method is deprecated and will be removed in v6. Use getJobSchedulers instead.
*
* @param start - Offset of first job to return.
* @param end - Offset of last job to return.
* @param asc - Determine the order in which jobs are returned based on their
* next execution time.
*/
async getRepeatableJobs(start, end, asc) {
return (await this.repeat).getRepeatableJobs(start, end, asc);
}
/**
* Get Job Scheduler by id
*
* @param id - identifier of scheduler.
*/
async getJobScheduler(id) {
return (await this.jobScheduler).getScheduler(id);
}
/**
* Get all Job Schedulers
*
* @param start - Offset of first scheduler to return.
* @param end - Offset of last scheduler to return.
* @param asc - Determine the order in which schedulers are returned based on their
* next execution time.
*/
async getJobSchedulers(start, end, asc) {
return (await this.jobScheduler).getJobSchedulers(start, end, asc);
}
/**
*
* Get the number of job schedulers.
*
* @returns The number of job schedulers.
*/
async getJobSchedulersCount() {
return (await this.jobScheduler).getSchedulersCount();
}
/**
* Removes a repeatable job.
*
* Note: you need to use the exact same repeatOpts when deleting a repeatable job
* than when adding it.
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @see removeRepeatableByKey
*
* @param name - Job name
* @param repeatOpts - Repeat options
* @param jobId - Job id to remove. If not provided, all jobs with the same repeatOpts
* @returns
*/
async removeRepeatable(name, repeatOpts, jobId) {
return this.trace(SpanKind.INTERNAL, 'removeRepeatable', `${this.name}.${name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobName]: name,
[TelemetryAttributes.JobId]: jobId,
});
const repeat = await this.repeat;
const removed = await repeat.removeRepeatable(name, repeatOpts, jobId);
return !removed;
});
}
/**
*
* Removes a job scheduler.
*
* @param jobSchedulerId - identifier of the job scheduler.
*
* @returns
*/
async removeJobScheduler(jobSchedulerId) {
const jobScheduler = await this.jobScheduler;
const removed = await jobScheduler.removeJobScheduler(jobSchedulerId);
return !removed;
}
/**
* Removes a debounce key.
* @deprecated use removeDeduplicationKey
*
* @param id - debounce identifier
*/
async removeDebounceKey(id) {
return this.trace(SpanKind.INTERNAL, 'removeDebounceKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobKey]: id,
});
const client = await this.client;
return await client.del(`${this.keys.de}:${id}`);
});
}
/**
* Removes a deduplication key.
*
* @param id - identifier
*/
async removeDeduplicationKey(id) {
return this.trace(SpanKind.INTERNAL, 'removeDeduplicationKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.DeduplicationKey]: id,
});
const client = await this.client;
return client.del(`${this.keys.de}:${id}`);
});
}
/**
* Removes rate limit key.
*/
async removeRateLimitKey() {
const client = await this.client;
return client.del(this.keys.limiter);
}
/**
* Removes a repeatable job by its key. Note that the key is the one used
* to store the repeatable job metadata and not one of the job iterations
* themselves. You can use "getRepeatableJobs" in order to get the keys.
*
* @see getRepeatableJobs
*
* @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead.
*
* @param repeatJobKey - To the repeatable job.
* @returns
*/
async removeRepeatableByKey(key) {
return this.trace(SpanKind.INTERNAL, 'removeRepeatableByKey', `${this.name}`, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobKey]: key,
});
const repeat = await this.repeat;
const removed = await repeat.removeRepeatableByKey(key);
return !removed;
});
}
/**
* Removes the given job from the queue as well as all its
* dependencies.
*
* @param jobId - The id of the job to remove
* @param opts - Options to remove a job
* @returns 1 if it managed to remove the job or 0 if the job or
* any of its dependencies were locked.
*/
async remove(jobId, { removeChildren = true } = {}) {
return this.trace(SpanKind.INTERNAL, 'remove', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobId]: jobId,
[TelemetryAttributes.JobOptions]: JSON.stringify({
removeChildren,
}),
});
const code = await this.scripts.remove(jobId, removeChildren);
if (code === 1) {
this.emit('removed', jobId);
}
return code;
});
}
/**
* Updates the given job's progress.
*
* @param jobId - The id of the job to update
* @param progress - Number or object to be saved as progress.
*/
async updateJobProgress(jobId, progress) {
await this.trace(SpanKind.INTERNAL, 'updateJobProgress', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobId]: jobId,
[TelemetryAttributes.JobProgress]: JSON.stringify(progress),
});
await this.scripts.updateProgress(jobId, progress);
this.emit('progress', jobId, progress);
});
}
/**
* Logs one row of job's log data.
*
* @param jobId - The job id to log against.
* @param logRow - String with log data to be logged.
* @param keepLogs - Max number of log entries to keep (0 for unlimited).
*
* @returns The total number of log entries for this job so far.
*/
async addJobLog(jobId, logRow, keepLogs) {
return Job.addJobLog(this, jobId, logRow, keepLogs);
}
/**
* Drains the queue, i.e., removes all jobs that are waiting
* or delayed, but not active, completed or failed.
*
* @param delayed - Pass true if it should also clean the
* delayed jobs.
*/
async drain(delayed = false) {
await this.trace(SpanKind.INTERNAL, 'drain', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueDrainDelay]: delayed,
});
await this.scripts.drain(delayed);
});
}
/**
* Cleans jobs from a queue. Similar to drain but keeps jobs within a certain
* grace period.
*
* @param grace - The grace period in milliseconds
* @param limit - Max number of jobs to clean
* @param type - The type of job to clean
* Possible values are completed, wait, active, paused, delayed, failed. Defaults to completed.
* @returns Id jobs from the deleted records
*/
async clean(grace, limit, type = 'completed') {
return this.trace(SpanKind.INTERNAL, 'clean', this.name, async (span) => {
const maxCount = limit || Infinity;
const maxCountPerCall = Math.min(10000, maxCount);
const timestamp = Date.now() - grace;
let deletedCount = 0;
const deletedJobsIds = [];
// Normalize 'waiting' to 'wait' for consistency with internal Redis keys
const normalizedType = type === 'waiting' ? 'wait' : type;
while (deletedCount < maxCount) {
const jobsIds = await this.scripts.cleanJobsInSet(normalizedType, timestamp, maxCountPerCall);
this.emit('cleaned', jobsIds, normalizedType);
deletedCount += jobsIds.length;
deletedJobsIds.push(...jobsIds);
if (jobsIds.length < maxCountPerCall) {
break;
}
}
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueGrace]: grace,
[TelemetryAttributes.JobType]: type,
[TelemetryAttributes.QueueCleanLimit]: maxCount,
[TelemetryAttributes.JobIds]: deletedJobsIds,
});
return deletedJobsIds;
});
}
/**
* Completely destroys the queue and all of its contents irreversibly.
* This method will *pause* the queue and requires that there are no
* active jobs. It is possible to bypass this requirement, i.e. not
* having active jobs using the "force" option.
*
* Note: This operation requires to iterate on all the jobs stored in the queue
* and can be slow for very large queues.
*
* @param opts - Obliterate options.
*/
async obliterate(opts) {
await this.trace(SpanKind.INTERNAL, 'obliterate', this.name, async () => {
await this.pause();
let cursor = 0;
do {
cursor = await this.scripts.obliterate(Object.assign({ force: false, count: 1000 }, opts));
} while (cursor);
});
}
/**
* Retry all the failed or completed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration,
* - state failed by default or completed.
* - timestamp from which timestamp to start moving jobs to wait status, default Date.now().
*
* @returns
*/
async retryJobs(opts = {}) {
await this.trace(SpanKind.PRODUCER, 'retryJobs', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
});
let cursor = 0;
do {
cursor = await this.scripts.retryJobs(opts.state, opts.count, opts.timestamp);
} while (cursor);
});
}
/**
* Promote all the delayed jobs.
*
* @param opts - An object with the following properties:
* - count number to limit how many jobs will be moved to wait status per iteration
*
* @returns
*/
async promoteJobs(opts = {}) {
await this.trace(SpanKind.INTERNAL, 'promoteJobs', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueOptions]: JSON.stringify(opts),
});
let cursor = 0;
do {
cursor = await this.scripts.promoteJobs(opts.count);
} while (cursor);
});
}
/**
* Trim the event stream to an approximately maxLength.
*
* @param maxLength -
*/
async trimEvents(maxLength) {
return this.trace(SpanKind.INTERNAL, 'trimEvents', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.QueueEventMaxLength]: maxLength,
});
const client = await this.client;
return await client.xtrim(this.keys.events, 'MAXLEN', '~', maxLength);
});
}
/**
* Delete old priority helper key.
*/
async removeDeprecatedPriorityKey() {
const client = await this.client;
return client.del(this.toKey('priority'));
}
}
//# sourceMappingURL=queue.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,49 @@
import { EventEmitter } from 'events';
import { ConnectionOptions, RedisClient } from '../interfaces';
interface RedisCapabilities {
canDoubleTimeout: boolean;
canBlockFor1Ms: boolean;
}
export interface RawCommand {
content: string;
name: string;
keys: number;
}
export declare class RedisConnection extends EventEmitter {
private readonly extraOptions?;
static minimumVersion: string;
static recommendedMinimumVersion: string;
closing: boolean;
capabilities: RedisCapabilities;
status: 'initializing' | 'ready' | 'closing' | 'closed';
protected _client: RedisClient;
private readonly opts;
private readonly initializing;
private version;
protected packageVersion: string;
private skipVersionCheck;
private handleClientError;
private handleClientClose;
private handleClientReady;
constructor(opts: ConnectionOptions, extraOptions?: {
shared?: boolean;
blocking?: boolean;
skipVersionCheck?: boolean;
skipWaitingForReady?: boolean;
});
private checkBlockingOptions;
/**
* Waits for a redis client to be ready.
* @param redis - client
*/
static waitUntilReady(client: RedisClient): Promise<void>;
get client(): Promise<RedisClient>;
protected loadCommands(packageVersion: string, providedScripts?: Record<string, RawCommand>): void;
private init;
disconnect(wait?: boolean): Promise<void>;
reconnect(): Promise<void>;
close(force?: boolean): Promise<void>;
private getRedisVersion;
get redisVersion(): string;
}
export {};

View File

@@ -0,0 +1,273 @@
import { __rest } from "tslib";
import { EventEmitter } from 'events';
import { default as IORedis } from 'ioredis';
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
import { CONNECTION_CLOSED_ERROR_MSG } from 'ioredis/built/utils';
import { decreaseMaxListeners, increaseMaxListeners, isNotConnectionError, isRedisCluster, isRedisInstance, isRedisVersionLowerThan, } from '../utils';
import { version as packageVersion } from '../version';
import * as scripts from '../scripts';
const overrideMessage = [
'BullMQ: WARNING! Your redis options maxRetriesPerRequest must be null',
'and will be overridden by BullMQ.',
].join(' ');
const deprecationMessage = 'BullMQ: Your redis options maxRetriesPerRequest must be null.';
export class RedisConnection extends EventEmitter {
constructor(opts, extraOptions) {
super();
this.extraOptions = extraOptions;
this.capabilities = {
canDoubleTimeout: false,
canBlockFor1Ms: true,
};
this.status = 'initializing';
this.packageVersion = packageVersion;
// Set extra options defaults
this.extraOptions = Object.assign({ shared: false, blocking: true, skipVersionCheck: false, skipWaitingForReady: false }, extraOptions);
if (!isRedisInstance(opts)) {
this.checkBlockingOptions(overrideMessage, opts);
this.opts = Object.assign({ port: 6379, host: '127.0.0.1', retryStrategy: function (times) {
return Math.max(Math.min(Math.exp(times), 20000), 1000);
} }, opts);
if (this.extraOptions.blocking) {
this.opts.maxRetriesPerRequest = null;
}
}
else {
this._client = opts;
// Test if the redis instance is using keyPrefix
// and if so, throw an error.
if (this._client.options.keyPrefix) {
throw new Error('BullMQ: ioredis does not support ioredis prefixes, use the prefix option instead.');
}
if (isRedisCluster(this._client)) {
this.opts = this._client.options.redisOptions;
}
else {
this.opts = this._client.options;
}
this.checkBlockingOptions(deprecationMessage, this.opts, true);
}
this.skipVersionCheck =
(extraOptions === null || extraOptions === void 0 ? void 0 : extraOptions.skipVersionCheck) ||
!!(this.opts && this.opts.skipVersionCheck);
this.handleClientError = (err) => {
this.emit('error', err);
};
this.handleClientClose = () => {
this.emit('close');
};
this.handleClientReady = () => {
this.emit('ready');
};
this.initializing = this.init();
this.initializing.catch(err => this.emit('error', err));
}
checkBlockingOptions(msg, options, throwError = false) {
if (this.extraOptions.blocking && options && options.maxRetriesPerRequest) {
if (throwError) {
throw new Error(msg);
}
else {
console.error(msg);
}
}
}
/**
* Waits for a redis client to be ready.
* @param redis - client
*/
static async waitUntilReady(client) {
if (client.status === 'ready') {
return;
}
if (client.status === 'wait') {
return client.connect();
}
if (client.status === 'end') {
throw new Error(CONNECTION_CLOSED_ERROR_MSG);
}
let handleReady;
let handleEnd;
let handleError;
try {
await new Promise((resolve, reject) => {
let lastError;
handleError = (err) => {
lastError = err;
};
handleReady = () => {
resolve();
};
handleEnd = () => {
if (client.status !== 'end') {
reject(lastError || new Error(CONNECTION_CLOSED_ERROR_MSG));
}
else {
if (lastError) {
reject(lastError);
}
else {
// when custon 'end' status is set we already closed
resolve();
}
}
};
increaseMaxListeners(client, 3);
client.once('ready', handleReady);
client.on('end', handleEnd);
client.once('error', handleError);
});
}
finally {
client.removeListener('end', handleEnd);
client.removeListener('error', handleError);
client.removeListener('ready', handleReady);
decreaseMaxListeners(client, 3);
}
}
get client() {
return this.initializing;
}
loadCommands(packageVersion, providedScripts) {
const finalScripts = providedScripts || scripts;
for (const property in finalScripts) {
// Only define the command if not already defined
const commandName = `${finalScripts[property].name}:${packageVersion}`;
if (!this._client[commandName]) {
this._client.defineCommand(commandName, {
numberOfKeys: finalScripts[property].keys,
lua: finalScripts[property].content,
});
}
}
}
async init() {
if (!this._client) {
const _a = this.opts, { url } = _a, rest = __rest(_a, ["url"]);
this._client = url ? new IORedis(url, rest) : new IORedis(rest);
}
increaseMaxListeners(this._client, 3);
this._client.on('error', this.handleClientError);
// ioredis treats connection errors as a different event ('close')
this._client.on('close', this.handleClientClose);
this._client.on('ready', this.handleClientReady);
if (!this.extraOptions.skipWaitingForReady) {
await RedisConnection.waitUntilReady(this._client);
}
this.loadCommands(this.packageVersion);
if (this._client['status'] !== 'end') {
this.version = await this.getRedisVersion();
if (this.skipVersionCheck !== true && !this.closing) {
if (isRedisVersionLowerThan(this.version, RedisConnection.minimumVersion)) {
throw new Error(`Redis version needs to be greater or equal than ${RedisConnection.minimumVersion} ` +
`Current: ${this.version}`);
}
if (isRedisVersionLowerThan(this.version, RedisConnection.recommendedMinimumVersion)) {
console.warn(`It is highly recommended to use a minimum Redis version of ${RedisConnection.recommendedMinimumVersion}
Current: ${this.version}`);
}
}
this.capabilities = {
canDoubleTimeout: !isRedisVersionLowerThan(this.version, '6.0.0'),
canBlockFor1Ms: !isRedisVersionLowerThan(this.version, '7.0.8'),
};
this.status = 'ready';
}
return this._client;
}
async disconnect(wait = true) {
const client = await this.client;
if (client.status !== 'end') {
let _resolve, _reject;
if (!wait) {
return client.disconnect();
}
const disconnecting = new Promise((resolve, reject) => {
increaseMaxListeners(client, 2);
client.once('end', resolve);
client.once('error', reject);
_resolve = resolve;
_reject = reject;
});
client.disconnect();
try {
await disconnecting;
}
finally {
decreaseMaxListeners(client, 2);
client.removeListener('end', _resolve);
client.removeListener('error', _reject);
}
}
}
async reconnect() {
const client = await this.client;
return client.connect();
}
async close(force = false) {
if (!this.closing) {
const status = this.status;
this.status = 'closing';
this.closing = true;
try {
if (status === 'ready') {
// Not sure if we need to wait for this
await this.initializing;
}
if (!this.extraOptions.shared) {
if (status == 'initializing' || force) {
// If we have not still connected to Redis, we need to disconnect.
this._client.disconnect();
}
else {
await this._client.quit();
}
// As IORedis does not update this status properly, we do it ourselves.
this._client['status'] = 'end';
}
}
catch (error) {
if (isNotConnectionError(error)) {
throw error;
}
}
finally {
this._client.off('error', this.handleClientError);
this._client.off('close', this.handleClientClose);
this._client.off('ready', this.handleClientReady);
decreaseMaxListeners(this._client, 3);
this.removeAllListeners();
this.status = 'closed';
}
}
}
async getRedisVersion() {
if (this.skipVersionCheck) {
return RedisConnection.minimumVersion;
}
const doc = await this._client.info();
const redisPrefix = 'redis_version:';
const maxMemoryPolicyPrefix = 'maxmemory_policy:';
const lines = doc.split(/\r?\n/);
let redisVersion;
for (let i = 0; i < lines.length; i++) {
if (lines[i].indexOf(maxMemoryPolicyPrefix) === 0) {
const maxMemoryPolicy = lines[i].substr(maxMemoryPolicyPrefix.length);
if (maxMemoryPolicy !== 'noeviction') {
console.warn(`IMPORTANT! Eviction policy is ${maxMemoryPolicy}. It should be "noeviction"`);
}
}
if (lines[i].indexOf(redisPrefix) === 0) {
redisVersion = lines[i].substr(redisPrefix.length);
}
}
return redisVersion;
}
get redisVersion() {
return this.version;
}
}
RedisConnection.minimumVersion = '5.0.0';
RedisConnection.recommendedMinimumVersion = '6.2.0';
//# sourceMappingURL=redis-connection.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,25 @@
import { RepeatBaseOptions, RepeatableJob, RepeatOptions } from '../interfaces';
import { JobsOptions } from '../types';
import { Job } from './job';
import { QueueBase } from './queue-base';
import { RedisConnection } from './redis-connection';
export declare class Repeat extends QueueBase {
private repeatStrategy;
private repeatKeyHashAlgorithm;
constructor(name: string, opts: RepeatBaseOptions, Connection?: typeof RedisConnection);
updateRepeatableJob<T = any, R = any, N extends string = string>(name: N, data: T, opts: JobsOptions, { override }: {
override: boolean;
}): Promise<Job<T, R, N> | undefined>;
private createNextJob;
getRepeatJobKey<T = any, N extends string = string>(name: N, nextMillis: number, repeatJobKey: string, data: T): string;
removeRepeatable(name: string, repeat: RepeatOptions, jobId?: string): Promise<number>;
removeRepeatableByKey(repeatJobKey: string): Promise<number>;
private getRepeatableData;
private keyToData;
getRepeatableJobs(start?: number, end?: number, asc?: boolean): Promise<RepeatableJob[]>;
getRepeatableCount(): Promise<number>;
private hash;
private getRepeatDelayedJobId;
private getRepeatJobId;
}
export declare const getNextMillis: (millis: number, opts: RepeatOptions) => number | undefined;

199
backend/node_modules/bullmq/dist/esm/classes/repeat.js generated vendored Normal file
View File

@@ -0,0 +1,199 @@
import { __rest } from "tslib";
import { parseExpression } from 'cron-parser';
import { createHash } from 'crypto';
import { QueueBase } from './queue-base';
export class Repeat extends QueueBase {
constructor(name, opts, Connection) {
super(name, opts, Connection);
this.repeatStrategy =
(opts.settings && opts.settings.repeatStrategy) || getNextMillis;
this.repeatKeyHashAlgorithm =
(opts.settings && opts.settings.repeatKeyHashAlgorithm) || 'md5';
}
async updateRepeatableJob(name, data, opts, { override }) {
var _a, _b;
// Backwards compatibility for repeatable jobs for versions <= 3.0.0
const repeatOpts = Object.assign({}, opts.repeat);
(_a = repeatOpts.pattern) !== null && _a !== void 0 ? _a : (repeatOpts.pattern = repeatOpts.cron);
delete repeatOpts.cron;
// Check if we reached the limit of the repeatable job's iterations
const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1;
if (typeof repeatOpts.limit !== 'undefined' &&
iterationCount > repeatOpts.limit) {
return;
}
// Check if we reached the end date of the repeatable job
let now = Date.now();
const { endDate } = repeatOpts;
if (endDate && now > new Date(endDate).getTime()) {
return;
}
const prevMillis = opts.prevMillis || 0;
now = prevMillis < now ? now : prevMillis;
const nextMillis = await this.repeatStrategy(now, repeatOpts, name);
const { every, pattern } = repeatOpts;
const hasImmediately = Boolean((every || pattern) && repeatOpts.immediately);
const offset = hasImmediately && every ? now - nextMillis : undefined;
if (nextMillis) {
// We store the undecorated opts.jobId into the repeat options
if (!prevMillis && opts.jobId) {
repeatOpts.jobId = opts.jobId;
}
const legacyRepeatKey = getRepeatConcatOptions(name, repeatOpts);
const newRepeatKey = (_b = opts.repeat.key) !== null && _b !== void 0 ? _b : this.hash(legacyRepeatKey);
let repeatJobKey;
if (override) {
repeatJobKey = await this.scripts.addRepeatableJob(newRepeatKey, nextMillis, {
name,
endDate: endDate ? new Date(endDate).getTime() : undefined,
tz: repeatOpts.tz,
pattern,
every,
}, legacyRepeatKey);
}
else {
const client = await this.client;
repeatJobKey = await this.scripts.updateRepeatableJobMillis(client, newRepeatKey, nextMillis, legacyRepeatKey);
}
const { immediately } = repeatOpts, filteredRepeatOpts = __rest(repeatOpts, ["immediately"]);
return this.createNextJob(name, nextMillis, repeatJobKey, Object.assign(Object.assign({}, opts), { repeat: Object.assign({ offset }, filteredRepeatOpts) }), data, iterationCount, hasImmediately);
}
}
async createNextJob(name, nextMillis, repeatJobKey, opts, data, currentCount, hasImmediately) {
//
// Generate unique job id for this iteration.
//
const jobId = this.getRepeatJobKey(name, nextMillis, repeatJobKey, data);
const now = Date.now();
const delay = nextMillis + (opts.repeat.offset ? opts.repeat.offset : 0) - now;
const mergedOpts = Object.assign(Object.assign({}, opts), { jobId, delay: delay < 0 || hasImmediately ? 0 : delay, timestamp: now, prevMillis: nextMillis, repeatJobKey });
mergedOpts.repeat = Object.assign(Object.assign({}, opts.repeat), { count: currentCount });
return this.Job.create(this, name, data, mergedOpts);
}
// TODO: remove legacy code in next breaking change
getRepeatJobKey(name, nextMillis, repeatJobKey, data) {
if (repeatJobKey.split(':').length > 2) {
return this.getRepeatJobId({
name: name,
nextMillis: nextMillis,
namespace: this.hash(repeatJobKey),
jobId: data === null || data === void 0 ? void 0 : data.id,
});
}
return this.getRepeatDelayedJobId({
customKey: repeatJobKey,
nextMillis,
});
}
async removeRepeatable(name, repeat, jobId) {
var _a;
const repeatConcatOptions = getRepeatConcatOptions(name, Object.assign(Object.assign({}, repeat), { jobId }));
const repeatJobKey = (_a = repeat.key) !== null && _a !== void 0 ? _a : this.hash(repeatConcatOptions);
const legacyRepeatJobId = this.getRepeatJobId({
name,
nextMillis: '',
namespace: this.hash(repeatConcatOptions),
jobId: jobId !== null && jobId !== void 0 ? jobId : repeat.jobId,
key: repeat.key,
});
return this.scripts.removeRepeatable(legacyRepeatJobId, repeatConcatOptions, repeatJobKey);
}
async removeRepeatableByKey(repeatJobKey) {
const data = this.keyToData(repeatJobKey);
const legacyRepeatJobId = this.getRepeatJobId({
name: data.name,
nextMillis: '',
namespace: this.hash(repeatJobKey),
jobId: data.id,
});
return this.scripts.removeRepeatable(legacyRepeatJobId, '', repeatJobKey);
}
async getRepeatableData(client, key, next) {
const jobData = await client.hgetall(this.toKey('repeat:' + key));
if (jobData) {
return {
key,
name: jobData.name,
endDate: parseInt(jobData.endDate) || null,
tz: jobData.tz || null,
pattern: jobData.pattern || null,
every: jobData.every || null,
next,
};
}
return this.keyToData(key, next);
}
keyToData(key, next) {
const data = key.split(':');
const pattern = data.slice(4).join(':') || null;
return {
key,
name: data[0],
id: data[1] || null,
endDate: parseInt(data[2]) || null,
tz: data[3] || null,
pattern,
next,
};
}
async getRepeatableJobs(start = 0, end = -1, asc = false) {
const client = await this.client;
const key = this.keys.repeat;
const result = asc
? await client.zrange(key, start, end, 'WITHSCORES')
: await client.zrevrange(key, start, end, 'WITHSCORES');
const jobs = [];
for (let i = 0; i < result.length; i += 2) {
jobs.push(this.getRepeatableData(client, result[i], parseInt(result[i + 1])));
}
return Promise.all(jobs);
}
async getRepeatableCount() {
const client = await this.client;
return client.zcard(this.toKey('repeat'));
}
hash(str) {
return createHash(this.repeatKeyHashAlgorithm).update(str).digest('hex');
}
getRepeatDelayedJobId({ nextMillis, customKey, }) {
return `repeat:${customKey}:${nextMillis}`;
}
getRepeatJobId({ name, nextMillis, namespace, jobId, key, }) {
const checksum = key !== null && key !== void 0 ? key : this.hash(`${name}${jobId || ''}${namespace}`);
return `repeat:${checksum}:${nextMillis}`;
}
}
function getRepeatConcatOptions(name, repeat) {
const endDate = repeat.endDate ? new Date(repeat.endDate).getTime() : '';
const tz = repeat.tz || '';
const pattern = repeat.pattern;
const suffix = (pattern ? pattern : String(repeat.every)) || '';
const jobId = repeat.jobId ? repeat.jobId : '';
return `${name}:${jobId}:${endDate}:${tz}:${suffix}`;
}
export const getNextMillis = (millis, opts) => {
const pattern = opts.pattern;
if (pattern && opts.every) {
throw new Error('Both .pattern and .every options are defined for this repeatable job');
}
if (opts.every) {
return (Math.floor(millis / opts.every) * opts.every +
(opts.immediately ? 0 : opts.every));
}
const currentDate = opts.startDate && new Date(opts.startDate) > new Date(millis)
? new Date(opts.startDate)
: new Date(millis);
const interval = parseExpression(pattern, Object.assign(Object.assign({}, opts), { currentDate }));
try {
if (opts.immediately) {
return new Date().getTime();
}
else {
return interval.next().getTime();
}
}
catch (e) {
// Ignore error
}
};
//# sourceMappingURL=repeat.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,4 @@
import { ChildPool } from './child-pool';
import { Job } from './job';
declare const sandbox: <T, R, N extends string>(processFile: any, childPool: ChildPool) => (job: Job<T, R, N>, token?: string) => Promise<R>;
export default sandbox;

109
backend/node_modules/bullmq/dist/esm/classes/sandbox.js generated vendored Normal file
View File

@@ -0,0 +1,109 @@
import { ChildCommand, ParentCommand } from '../enums';
const sandbox = (processFile, childPool) => {
return async function process(job, token) {
let child;
let msgHandler;
let exitHandler;
try {
const done = new Promise((resolve, reject) => {
const initChild = async () => {
try {
exitHandler = (exitCode, signal) => {
reject(new Error('Unexpected exit code: ' + exitCode + ' signal: ' + signal));
};
child = await childPool.retain(processFile);
child.on('exit', exitHandler);
msgHandler = async (msg) => {
var _a, _b, _c, _d, _e;
try {
switch (msg.cmd) {
case ParentCommand.Completed:
resolve(msg.value);
break;
case ParentCommand.Failed:
case ParentCommand.Error: {
const err = new Error();
Object.assign(err, msg.value);
reject(err);
break;
}
case ParentCommand.Progress:
await job.updateProgress(msg.value);
break;
case ParentCommand.Log:
await job.log(msg.value);
break;
case ParentCommand.MoveToDelayed:
await job.moveToDelayed((_a = msg.value) === null || _a === void 0 ? void 0 : _a.timestamp, (_b = msg.value) === null || _b === void 0 ? void 0 : _b.token);
break;
case ParentCommand.MoveToWait:
await job.moveToWait((_c = msg.value) === null || _c === void 0 ? void 0 : _c.token);
break;
case ParentCommand.MoveToWaitingChildren:
{
const value = await job.moveToWaitingChildren((_d = msg.value) === null || _d === void 0 ? void 0 : _d.token, (_e = msg.value) === null || _e === void 0 ? void 0 : _e.opts);
child.send({
requestId: msg.requestId,
cmd: ChildCommand.MoveToWaitingChildrenResponse,
value,
});
}
break;
case ParentCommand.Update:
await job.updateData(msg.value);
break;
case ParentCommand.GetChildrenValues:
{
const value = await job.getChildrenValues();
child.send({
requestId: msg.requestId,
cmd: ChildCommand.GetChildrenValuesResponse,
value,
});
}
break;
case ParentCommand.GetIgnoredChildrenFailures:
{
const value = await job.getIgnoredChildrenFailures();
child.send({
requestId: msg.requestId,
cmd: ChildCommand.GetIgnoredChildrenFailuresResponse,
value,
});
}
break;
}
}
catch (err) {
reject(err);
}
};
child.on('message', msgHandler);
child.send({
cmd: ChildCommand.Start,
job: job.asJSONSandbox(),
token,
});
}
catch (error) {
reject(error);
}
};
initChild();
});
await done;
return done;
}
finally {
if (child) {
child.off('message', msgHandler);
child.off('exit', exitHandler);
if (child.exitCode === null && child.signalCode === null) {
childPool.release(child);
}
}
}
};
};
export default sandbox;
//# sourceMappingURL=sandbox.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"sandbox.js","sourceRoot":"","sources":["../../../src/classes/sandbox.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AAMvD,MAAM,OAAO,GAAG,CACd,WAAgB,EAChB,SAAoB,EACpB,EAAE;IACF,OAAO,KAAK,UAAU,OAAO,CAAC,GAAiB,EAAE,KAAc;QAC7D,IAAI,KAAY,CAAC;QACjB,IAAI,UAAe,CAAC;QACpB,IAAI,WAAgB,CAAC;QACrB,IAAI,CAAC;YACH,MAAM,IAAI,GAAe,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;gBACvD,MAAM,SAAS,GAAG,KAAK,IAAI,EAAE;oBAC3B,IAAI,CAAC;wBACH,WAAW,GAAG,CAAC,QAAa,EAAE,MAAW,EAAE,EAAE;4BAC3C,MAAM,CACJ,IAAI,KAAK,CACP,wBAAwB,GAAG,QAAQ,GAAG,WAAW,GAAG,MAAM,CAC3D,CACF,CAAC;wBACJ,CAAC,CAAC;wBAEF,KAAK,GAAG,MAAM,SAAS,CAAC,MAAM,CAAC,WAAW,CAAC,CAAC;wBAC5C,KAAK,CAAC,EAAE,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;wBAE9B,UAAU,GAAG,KAAK,EAAE,GAAiB,EAAE,EAAE;;4BACvC,IAAI,CAAC;gCACH,QAAQ,GAAG,CAAC,GAAG,EAAE,CAAC;oCAChB,KAAK,aAAa,CAAC,SAAS;wCAC1B,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACnB,MAAM;oCACR,KAAK,aAAa,CAAC,MAAM,CAAC;oCAC1B,KAAK,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC;wCACzB,MAAM,GAAG,GAAG,IAAI,KAAK,EAAE,CAAC;wCACxB,MAAM,CAAC,MAAM,CAAC,GAAG,EAAE,GAAG,CAAC,KAAK,CAAC,CAAC;wCAC9B,MAAM,CAAC,GAAG,CAAC,CAAC;wCACZ,MAAM;oCACR,CAAC;oCACD,KAAK,aAAa,CAAC,QAAQ;wCACzB,MAAM,GAAG,CAAC,cAAc,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACpC,MAAM;oCACR,KAAK,aAAa,CAAC,GAAG;wCACpB,MAAM,GAAG,CAAC,GAAG,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCACzB,MAAM;oCACR,KAAK,aAAa,CAAC,aAAa;wCAC9B,MAAM,GAAG,CAAC,aAAa,CACrB,MAAA,GAAG,CAAC,KAAK,0CAAE,SAAS,EACpB,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CACjB,CAAC;wCACF,MAAM;oCACR,KAAK,aAAa,CAAC,UAAU;wCAC3B,MAAM,GAAG,CAAC,UAAU,CAAC,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,CAAC,CAAC;wCACvC,MAAM;oCACR,KAAK,aAAa,CAAC,qBAAqB;wCACtC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,qBAAqB,CAC3C,MAAA,GAAG,CAAC,KAAK,0CAAE,KAAK,EAChB,MAAA,GAAG,CAAC,KAAK,0CAAE,IAAI,CAChB,CAAC;4CACF,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,YAAY,CAAC,6BAA6B;gDAC/C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,aAAa,CAAC,MAAM;wCACvB,MAAM,GAAG,CAAC,UAAU,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;wCAChC,MAAM;oCACR,KAAK,aAAa,CAAC,iBAAiB;wCAClC,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,iBAAiB,EAAE,CAAC;4CAC5C,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,YAAY,CAAC,yBAAyB;gDAC3C,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;oCACR,KAAK,aAAa,CAAC,0BAA0B;wCAC3C,CAAC;4CACC,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,0BAA0B,EAAE,CAAC;4CACrD,KAAK,CAAC,IAAI,CAAC;gDACT,SAAS,EAAE,GAAG,CAAC,SAAS;gDACxB,GAAG,EAAE,YAAY,CAAC,kCAAkC;gDACpD,KAAK;6CACN,CAAC,CAAC;wCACL,CAAC;wCACD,MAAM;gCACV,CAAC;4BACH,CAAC;4BAAC,OAAO,GAAG,EAAE,CAAC;gCACb,MAAM,CAAC,GAAG,CAAC,CAAC;4BACd,CAAC;wBACH,CAAC,CAAC;wBAEF,KAAK,CAAC,EAAE,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;wBAEhC,KAAK,CAAC,IAAI,CAAC;4BACT,GAAG,EAAE,YAAY,CAAC,KAAK;4BACvB,GAAG,EAAE,GAAG,CAAC,aAAa,EAAE;4BACxB,KAAK;yBACN,CAAC,CAAC;oBACL,CAAC;oBAAC,OAAO,KAAK,EAAE,CAAC;wBACf,MAAM,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;gBACH,CAAC,CAAC;gBACF,SAAS,EAAE,CAAC;YACd,CAAC,CAAC,CAAC;YAEH,MAAM,IAAI,CAAC;YACX,OAAO,IAAI,CAAC;QACd,CAAC;gBAAS,CAAC;YACT,IAAI,KAAK,EAAE,CAAC;gBACV,KAAK,CAAC,GAAG,CAAC,SAAS,EAAE,UAAU,CAAC,CAAC;gBACjC,KAAK,CAAC,GAAG,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;gBAC/B,IAAI,KAAK,CAAC,QAAQ,KAAK,IAAI,IAAI,KAAK,CAAC,UAAU,KAAK,IAAI,EAAE,CAAC;oBACzD,SAAS,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;gBAC3B,CAAC;YACH,CAAC;QACH,CAAC;IACH,CAAC,CAAC;AACJ,CAAC,CAAC;AAEF,eAAe,OAAO,CAAC"}

View File

@@ -0,0 +1,183 @@
/**
* Includes all the scripts needed by the queue and jobs.
*/
import { JobJson, JobJsonRaw, MinimalJob, MoveToWaitingChildrenOpts, ParentKeyOpts, RedisClient, MoveToDelayedOpts, RepeatableOptions, RetryJobOpts, RetryOptions, ScriptQueueContext } from '../interfaces';
import { JobsOptions, JobState, JobType, FinishedStatus, FinishedPropValAttribute, KeepJobs, RedisJobOptions, JobProgress } from '../types';
import { ChainableCommander } from 'ioredis';
export type JobData = [JobJsonRaw | number, string?];
export declare class Scripts {
protected queue: ScriptQueueContext;
protected version: string;
moveToFinishedKeys: (string | undefined)[];
constructor(queue: ScriptQueueContext);
execCommand(client: RedisClient | ChainableCommander, commandName: string, args: any[]): any;
isJobInList(listKey: string, jobId: string): Promise<boolean>;
protected addDelayedJobArgs(job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): (string | Buffer)[];
protected addDelayedJob(client: RedisClient, job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): Promise<string | number>;
protected addPrioritizedJobArgs(job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): (string | Buffer)[];
protected addPrioritizedJob(client: RedisClient, job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): Promise<string | number>;
protected addParentJobArgs(job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): (string | Buffer)[];
protected addParentJob(client: RedisClient, job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): Promise<string | number>;
protected addStandardJobArgs(job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): (string | Buffer)[];
protected addStandardJob(client: RedisClient, job: JobJson, encodedOpts: any, args: (string | number | Record<string, any>)[]): Promise<string | number>;
addJob(client: RedisClient, job: JobJson, opts: RedisJobOptions, jobId: string, parentKeyOpts?: ParentKeyOpts): Promise<string>;
protected pauseArgs(pause: boolean): (string | number)[];
pause(pause: boolean): Promise<void>;
protected addRepeatableJobArgs(customKey: string, nextMillis: number, opts: RepeatableOptions, legacyCustomKey: string): (string | number | Buffer)[];
addRepeatableJob(customKey: string, nextMillis: number, opts: RepeatableOptions, legacyCustomKey: string): Promise<string>;
removeDeduplicationKey(deduplicationId: string, jobId: string): Promise<number>;
addJobScheduler(jobSchedulerId: string, nextMillis: number, templateData: string, templateOpts: RedisJobOptions, opts: RepeatableOptions, delayedJobOpts: JobsOptions, producerId?: string): Promise<[string, number]>;
updateRepeatableJobMillis(client: RedisClient, customKey: string, nextMillis: number, legacyCustomKey: string): Promise<string>;
updateJobSchedulerNextMillis(jobSchedulerId: string, nextMillis: number, templateData: string, delayedJobOpts: JobsOptions, producerId?: string): Promise<string | null>;
private removeRepeatableArgs;
getRepeatConcatOptions(repeatConcatOptions: string, repeatJobKey: string): string;
removeRepeatable(legacyRepeatJobId: string, repeatConcatOptions: string, repeatJobKey: string): Promise<number>;
removeJobScheduler(jobSchedulerId: string): Promise<number>;
protected removeArgs(jobId: string, removeChildren: boolean): (string | number)[];
remove(jobId: string, removeChildren: boolean): Promise<number>;
removeUnprocessedChildren(jobId: string): Promise<void>;
extendLock(jobId: string, token: string, duration: number, client?: RedisClient | ChainableCommander): Promise<number>;
extendLocks(jobIds: string[], tokens: string[], duration: number): Promise<string[]>;
updateData<T = any, R = any, N extends string = string>(job: MinimalJob<T, R, N>, data: T): Promise<void>;
updateProgress(jobId: string, progress: JobProgress): Promise<void>;
addLog(jobId: string, logRow: string, keepLogs?: number): Promise<number>;
protected moveToFinishedArgs<T = any, R = any, N extends string = string>(job: MinimalJob<T, R, N>, val: any, propVal: FinishedPropValAttribute, shouldRemove: undefined | boolean | number | KeepJobs, target: FinishedStatus, token: string, timestamp: number, fetchNext?: boolean, fieldsToUpdate?: Record<string, any>): (string | number | boolean | Buffer)[];
protected getKeepJobs(shouldRemove: undefined | boolean | number | KeepJobs, workerKeepJobs: undefined | KeepJobs): KeepJobs;
moveToFinished(jobId: string, args: (string | number | boolean | Buffer)[]): Promise<any[]>;
private drainArgs;
drain(delayed: boolean): Promise<void>;
private removeChildDependencyArgs;
removeChildDependency(jobId: string, parentKey: string): Promise<boolean>;
private getRangesArgs;
getRanges(types: JobType[], start?: number, end?: number, asc?: boolean): Promise<[string][]>;
private getCountsArgs;
getCounts(types: JobType[]): Promise<number[]>;
protected getCountsPerPriorityArgs(priorities: number[]): (string | number)[];
getCountsPerPriority(priorities: number[]): Promise<number[]>;
protected getDependencyCountsArgs(jobId: string, types: string[]): (string | number)[];
getDependencyCounts(jobId: string, types: string[]): Promise<number[]>;
moveToCompletedArgs<T = any, R = any, N extends string = string>(job: MinimalJob<T, R, N>, returnvalue: R, removeOnComplete: boolean | number | KeepJobs, token: string, fetchNext?: boolean): (string | number | boolean | Buffer)[];
moveToFailedArgs<T = any, R = any, N extends string = string>(job: MinimalJob<T, R, N>, failedReason: string, removeOnFailed: boolean | number | KeepJobs, token: string, fetchNext?: boolean, fieldsToUpdate?: Record<string, any>): (string | number | boolean | Buffer)[];
isFinished(jobId: string, returnValue?: boolean): Promise<number | [number, string]>;
getState(jobId: string): Promise<JobState | 'unknown'>;
/**
* Change delay of a delayed job.
*
* Reschedules a delayed job by setting a new delay from the current time.
* For example, calling changeDelay(5000) will reschedule the job to execute
* 5000 milliseconds (5 seconds) from now, regardless of the original delay.
*
* @param jobId - the ID of the job to change the delay for.
* @param delay - milliseconds from now when the job should be processed.
* @returns delay in milliseconds.
* @throws JobNotExist
* This exception is thrown if jobId is missing.
* @throws JobNotInState
* This exception is thrown if job is not in delayed state.
*/
changeDelay(jobId: string, delay: number): Promise<void>;
private changeDelayArgs;
changePriority(jobId: string, priority?: number, lifo?: boolean): Promise<void>;
protected changePriorityArgs(jobId: string, priority?: number, lifo?: boolean): (string | number)[];
moveToDelayedArgs(jobId: string, timestamp: number, token: string, delay: number, opts?: MoveToDelayedOpts): (string | number | Buffer)[];
moveToWaitingChildrenArgs(jobId: string, token: string, opts?: MoveToWaitingChildrenOpts): (string | number)[];
isMaxedArgs(): string[];
isMaxed(): Promise<boolean>;
moveToDelayed(jobId: string, timestamp: number, delay: number, token?: string, opts?: MoveToDelayedOpts): Promise<void>;
/**
* Move parent job to waiting-children state.
*
* @returns true if job is successfully moved, false if there are pending dependencies.
* @throws JobNotExist
* This exception is thrown if jobId is missing.
* @throws JobLockNotExist
* This exception is thrown if job lock is missing.
* @throws JobNotInState
* This exception is thrown if job is not in active state.
*/
moveToWaitingChildren(jobId: string, token: string, opts?: MoveToWaitingChildrenOpts): Promise<boolean>;
getRateLimitTtlArgs(maxJobs?: number): (string | number)[];
getRateLimitTtl(maxJobs?: number): Promise<number>;
/**
* Remove jobs in a specific state.
*
* @returns Id jobs from the deleted records.
*/
cleanJobsInSet(set: string, timestamp: number, limit?: number): Promise<string[]>;
getJobSchedulerArgs(id: string): string[];
getJobScheduler(id: string): Promise<[any, string | null]>;
retryJobArgs(jobId: string, lifo: boolean, token: string, opts?: MoveToDelayedOpts): (string | number | Buffer)[];
retryJob(jobId: string, lifo: boolean, token?: string, opts?: RetryJobOpts): Promise<void>;
protected moveJobsToWaitArgs(state: FinishedStatus | 'delayed', count: number, timestamp: number): (string | number)[];
retryJobs(state?: FinishedStatus, count?: number, timestamp?: number): Promise<number>;
promoteJobs(count?: number): Promise<number>;
/**
* Attempts to reprocess a job
*
* @param job - The job to reprocess
* @param state - The expected job state. If the job is not found
* on the provided state, then it's not reprocessed. Supported states: 'failed', 'completed'
*
* @returns A promise that resolves when the job has been successfully moved to the wait queue.
* @throws Will throw an error with a code property indicating the failure reason:
* - code 0: Job does not exist
* - code -1: Job is currently locked and can't be retried
* - code -2: Job was not found in the expected set
*/
reprocessJob<T = any, R = any, N extends string = string>(job: MinimalJob<T, R, N>, state: 'failed' | 'completed', opts?: RetryOptions): Promise<void>;
getMetrics(type: 'completed' | 'failed', start?: number, end?: number): Promise<[string[], string[], number]>;
moveToActive(client: RedisClient, token: string, name?: string): Promise<any[]>;
promote(jobId: string): Promise<void>;
protected moveStalledJobsToWaitArgs(): (string | number)[];
/**
* Looks for unlocked jobs in the active queue.
*
* The job was being worked on, but the worker process died and it failed to renew the lock.
* We call these jobs 'stalled'. This is the most common case. We resolve these by moving them
* back to wait to be re-processed. To prevent jobs from cycling endlessly between active and wait,
* (e.g. if the job handler keeps crashing),
* we limit the number stalled job recoveries to settings.maxStalledCount.
*/
moveStalledJobsToWait(): Promise<string[]>;
/**
* Moves a job back from Active to Wait.
* This script is used when a job has been manually rate limited and needs
* to be moved back to wait from active status.
*
* @param client - Redis client
* @param jobId - Job id
* @returns
*/
moveJobFromActiveToWait(jobId: string, token?: string): Promise<any>;
obliterate(opts: {
force: boolean;
count: number;
}): Promise<number>;
/**
* Paginate a set or hash keys.
* @param opts - options to define the pagination behaviour
*
*/
paginate(key: string, opts: {
start: number;
end: number;
fetchJobs?: boolean;
}): Promise<{
cursor: string;
items: {
id: string;
v?: any;
err?: string;
}[];
total: number;
jobs?: JobJsonRaw[];
}>;
finishedErrors({ code, jobId, parentKey, command, state, }: {
code: number;
jobId?: string;
parentKey?: string;
command: string;
state?: string;
}): Error;
}
export declare function raw2NextJobData(raw: any[]): any[];

1196
backend/node_modules/bullmq/dist/esm/classes/scripts.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,286 @@
import { URL } from 'url';
import { AbortController } from 'node-abort-controller';
import { GetNextJobOptions, IoredisListener, JobJsonRaw, RedisClient, Span, WorkerOptions } from '../interfaces';
import { JobProgress } from '../types';
import { Processor } from '../types/processor';
import { QueueBase } from './queue-base';
import { Repeat } from './repeat';
import { Job } from './job';
import { RedisConnection } from './redis-connection';
import { JobScheduler } from './job-scheduler';
import { LockManager } from './lock-manager';
export interface WorkerListener<DataType = any, ResultType = any, NameType extends string = string> extends IoredisListener {
/**
* Listen to 'active' event.
*
* This event is triggered when a job enters the 'active' state.
*/
active: (job: Job<DataType, ResultType, NameType>, prev: string) => void;
/**
* Listen to 'closing' event.
*
* This event is triggered when the worker is closed.
*/
closed: () => void;
/**
* Listen to 'closing' event.
*
* This event is triggered when the worker is closing.
*/
closing: (msg: string) => void;
/**
* Listen to 'completed' event.
*
* This event is triggered when a job has successfully completed.
*/
completed: (job: Job<DataType, ResultType, NameType>, result: ResultType, prev: string) => void;
/**
* Listen to 'drained' event.
*
* This event is triggered when the queue has drained the waiting list.
* Note that there could still be delayed jobs waiting their timers to expire
* and this event will still be triggered as long as the waiting list has emptied.
*/
drained: () => void;
/**
* Listen to 'error' event.
*
* This event is triggered when an error is throw.
*/
error: (failedReason: Error) => void;
/**
* Listen to 'failed' event.
*
* This event is triggered when a job has thrown an exception.
* Note: job parameter could be received as undefined when an stalled job
* reaches the stalled limit and it is deleted by the removeOnFail option.
*/
failed: (job: Job<DataType, ResultType, NameType> | undefined, error: Error, prev: string) => void;
/**
* Listen to 'paused' event.
*
* This event is triggered when the queue is paused.
*/
paused: () => void;
/**
* Listen to 'progress' event.
*
* This event is triggered when a job updates it progress, i.e. the
* Job##updateProgress() method is called. This is useful to notify
* progress or any other data from within a processor to the rest of the
* world.
*/
progress: (job: Job<DataType, ResultType, NameType>, progress: JobProgress) => void;
/**
* Listen to 'ready' event.
*
* This event is triggered when blockingConnection is ready.
*/
ready: () => void;
/**
* Listen to 'resumed' event.
*
* This event is triggered when the queue is resumed.
*/
resumed: () => void;
/**
* Listen to 'stalled' event.
*
* This event is triggered when a job has stalled and
* has been moved back to the wait list.
*/
stalled: (jobId: string, prev: string) => void;
/**
* Listen to 'lockRenewalFailed' event.
*
* This event is triggered when lock renewal fails for one or more jobs.
*/
lockRenewalFailed: (jobIds: string[]) => void;
/**
* Listen to 'locksRenewed' event.
*
* This event is triggered when locks are successfully renewed.
*/
locksRenewed: (data: {
count: number;
jobIds: string[];
}) => void;
}
/**
*
* This class represents a worker that is able to process jobs from the queue.
* As soon as the class is instantiated and a connection to Redis is established
* it will start processing jobs.
*
*/
export declare class Worker<DataType = any, ResultType = any, NameType extends string = string> extends QueueBase {
readonly opts: WorkerOptions;
readonly id: string;
private abortDelayController;
private blockingConnection;
private blockUntil;
private _concurrency;
private childPool;
private drained;
private limitUntil;
protected lockManager: LockManager;
private processorAcceptsSignal;
private stalledCheckStopper?;
private waiting;
private _repeat;
protected _jobScheduler: JobScheduler;
protected paused: boolean;
protected processFn: Processor<DataType, ResultType, NameType>;
protected running: boolean;
protected mainLoopRunning: Promise<void> | null;
static RateLimitError(): Error;
constructor(name: string, processor?: string | URL | null | Processor<DataType, ResultType, NameType>, opts?: WorkerOptions, Connection?: typeof RedisConnection);
/**
* Creates and configures the lock manager for processing jobs.
* This method can be overridden in subclasses to customize lock manager behavior.
*/
protected createLockManager(): void;
/**
* Creates and configures the sandbox for processing jobs.
* This method can be overridden in subclasses to customize sandbox behavior.
*
* @param processor - The processor file path, URL, or function to be sandboxed
*/
protected createSandbox(processor: string | URL | null | Processor<DataType, ResultType, NameType>): void;
/**
* Public accessor method for LockManager to extend locks.
* This delegates to the protected scripts object.
*/
extendJobLocks(jobIds: string[], tokens: string[], duration: number): Promise<string[]>;
emit<U extends keyof WorkerListener<DataType, ResultType, NameType>>(event: U, ...args: Parameters<WorkerListener<DataType, ResultType, NameType>[U]>): boolean;
off<U extends keyof WorkerListener<DataType, ResultType, NameType>>(eventName: U, listener: WorkerListener<DataType, ResultType, NameType>[U]): this;
on<U extends keyof WorkerListener<DataType, ResultType, NameType>>(event: U, listener: WorkerListener<DataType, ResultType, NameType>[U]): this;
once<U extends keyof WorkerListener<DataType, ResultType, NameType>>(event: U, listener: WorkerListener<DataType, ResultType, NameType>[U]): this;
protected callProcessJob(job: Job<DataType, ResultType, NameType>, token: string, signal?: AbortSignal): Promise<ResultType>;
protected createJob(data: JobJsonRaw, jobId: string): Job<DataType, ResultType, NameType>;
/**
*
* Waits until the worker is ready to start processing jobs.
* In general only useful when writing tests.
*
*/
waitUntilReady(): Promise<RedisClient>;
/**
* Cancels a specific job currently being processed by this worker.
* The job's processor function will receive an abort signal.
*
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId: string, reason?: string): boolean;
/**
* Cancels all jobs currently being processed by this worker.
* All active job processor functions will receive abort signals.
*
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason?: string): void;
set concurrency(concurrency: number);
get concurrency(): number;
get repeat(): Promise<Repeat>;
get jobScheduler(): Promise<JobScheduler>;
run(): Promise<void>;
private waitForRateLimit;
/**
* This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
* as efficiently as possible, providing concurrency and minimal unnecessary calls
* to Redis.
*/
private mainLoop;
/**
* Returns a promise that resolves to the next job in queue.
* @param token - worker token to be assigned to retrieved job
* @returns a Job or undefined if no job was available in the queue.
*/
getNextJob(token: string, { block }?: GetNextJobOptions): Promise<Job<DataType, ResultType, NameType>>;
private _getNextJob;
/**
* Overrides the rate limit to be active for the next jobs.
* @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
* @param expireTimeMs - expire time in ms of this rate limit.
*/
rateLimit(expireTimeMs: number): Promise<void>;
get minimumBlockTimeout(): number;
private isRateLimited;
protected moveToActive(client: RedisClient, token: string, name?: string): Promise<Job<DataType, ResultType, NameType>>;
private waitForJob;
protected getBlockTimeout(blockUntil: number): number;
protected getRateLimitDelay(delay: number): number;
/**
*
* This function is exposed only for testing purposes.
*/
delay(milliseconds?: number, abortController?: AbortController): Promise<void>;
private updateDelays;
protected nextJobFromJobData(jobData?: JobJsonRaw, jobId?: string, token?: string): Promise<Job<DataType, ResultType, NameType>>;
processJob(job: Job<DataType, ResultType, NameType>, token: string, fetchNextCallback?: () => boolean): Promise<void | Job<DataType, ResultType, NameType>>;
private getUnrecoverableErrorMessage;
protected handleCompleted(result: ResultType, job: Job<DataType, ResultType, NameType>, token: string, fetchNextCallback?: () => boolean, span?: Span): Promise<Job<DataType, ResultType, NameType>>;
protected handleFailed(err: Error, job: Job<DataType, ResultType, NameType>, token: string, fetchNextCallback?: () => boolean, span?: Span): Promise<Job<DataType, ResultType, NameType>>;
/**
*
* Pauses the processing of this queue only for this worker.
*/
pause(doNotWaitActive?: boolean): Promise<void>;
/**
*
* Resumes processing of this worker (if paused).
*/
resume(): void;
/**
*
* Checks if worker is paused.
*
* @returns true if worker is paused, false otherwise.
*/
isPaused(): boolean;
/**
*
* Checks if worker is currently running.
*
* @returns true if worker is running, false otherwise.
*/
isRunning(): boolean;
/**
*
* Closes the worker and related redis connections.
*
* This method waits for current jobs to finalize before returning.
*
* @param force - Use force boolean parameter if you do not want to wait for
* current jobs to be processed. When using telemetry, be mindful that it can
* interfere with the proper closure of spans, potentially preventing them from being exported.
*
* @returns Promise that resolves when the worker has been closed.
*/
close(force?: boolean): Promise<void>;
/**
*
* Manually starts the stalled checker.
* The check will run once as soon as this method is called, and
* then every opts.stalledInterval milliseconds until the worker is closed.
* Note: Normally you do not need to call this method, since the stalled checker
* is automatically started when the worker starts processing jobs after
* calling run. However if you want to process the jobs manually you need
* to call this method to start the stalled checker.
*
* @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
*/
startStalledCheckTimer(): Promise<void>;
private stalledChecker;
/**
* Returns a promise that resolves when active jobs are cleared
*
* @returns
*/
private whenCurrentJobsFinished;
private retryIfFailed;
private moveStalledJobsToWait;
private moveLimitedBackToWait;
}

870
backend/node_modules/bullmq/dist/esm/classes/worker.js generated vendored Normal file
View File

@@ -0,0 +1,870 @@
import * as fs from 'fs';
import { URL } from 'url';
import * as path from 'path';
import { v4 } from 'uuid';
// Note: this Polyfill is only needed for Node versions < 15.4.0
import { AbortController } from 'node-abort-controller';
import { delay, DELAY_TIME_1, isNotConnectionError, isRedisInstance, } from '../utils';
import { QueueBase } from './queue-base';
import { Repeat } from './repeat';
import { ChildPool } from './child-pool';
import { RedisConnection } from './redis-connection';
import sandbox from './sandbox';
import { AsyncFifoQueue } from './async-fifo-queue';
import { DelayedError, RateLimitError, RATE_LIMIT_ERROR, WaitingChildrenError, WaitingError, UnrecoverableError, } from './errors';
import { SpanKind, TelemetryAttributes } from '../enums';
import { JobScheduler } from './job-scheduler';
import { LockManager } from './lock-manager';
// 10 seconds is the maximum time a BZPOPMIN can block.
const maximumBlockTimeout = 10;
/**
*
* This class represents a worker that is able to process jobs from the queue.
* As soon as the class is instantiated and a connection to Redis is established
* it will start processing jobs.
*
*/
export class Worker extends QueueBase {
static RateLimitError() {
return new RateLimitError();
}
constructor(name, processor, opts, Connection) {
super(name, Object.assign(Object.assign({ drainDelay: 5, concurrency: 1, lockDuration: 30000, maximumRateLimitDelay: 30000, maxStalledCount: 1, stalledInterval: 30000, autorun: true, runRetryDelay: 15000 }, opts), { blockingConnection: true }), Connection);
this.abortDelayController = null;
this.blockUntil = 0;
this.drained = false;
this.limitUntil = 0;
this.processorAcceptsSignal = false;
this.waiting = null;
this.running = false;
this.mainLoopRunning = null;
if (!opts || !opts.connection) {
throw new Error('Worker requires a connection');
}
if (typeof this.opts.maxStalledCount !== 'number' ||
this.opts.maxStalledCount < 0) {
throw new Error('maxStalledCount must be greater or equal than 0');
}
if (typeof this.opts.maxStartedAttempts === 'number' &&
this.opts.maxStartedAttempts < 0) {
throw new Error('maxStartedAttempts must be greater or equal than 0');
}
if (typeof this.opts.stalledInterval !== 'number' ||
this.opts.stalledInterval <= 0) {
throw new Error('stalledInterval must be greater than 0');
}
if (typeof this.opts.drainDelay !== 'number' || this.opts.drainDelay <= 0) {
throw new Error('drainDelay must be greater than 0');
}
this.concurrency = this.opts.concurrency;
this.opts.lockRenewTime =
this.opts.lockRenewTime || this.opts.lockDuration / 2;
this.id = v4();
this.createLockManager();
if (processor) {
if (typeof processor === 'function') {
this.processFn = processor;
// Check if processor accepts signal parameter (3rd parameter)
this.processorAcceptsSignal = processor.length >= 3;
}
else {
// SANDBOXED
if (processor instanceof URL) {
if (!fs.existsSync(processor)) {
throw new Error(`URL ${processor} does not exist in the local file system`);
}
processor = processor.href;
}
else {
const supportedFileTypes = ['.js', '.ts', '.flow', '.cjs', '.mjs'];
const processorFile = processor +
(supportedFileTypes.includes(path.extname(processor)) ? '' : '.js');
if (!fs.existsSync(processorFile)) {
throw new Error(`File ${processorFile} does not exist`);
}
}
// Separate paths so that bundling tools can resolve dependencies easier
const dirname = path.dirname(module.filename || __filename);
const workerThreadsMainFile = path.join(dirname, 'main-worker.js');
const spawnProcessMainFile = path.join(dirname, 'main.js');
let mainFilePath = this.opts.useWorkerThreads
? workerThreadsMainFile
: spawnProcessMainFile;
try {
fs.statSync(mainFilePath); // would throw if file not exists
}
catch (_) {
const mainFile = this.opts.useWorkerThreads
? 'main-worker.js'
: 'main.js';
mainFilePath = path.join(process.cwd(), `dist/cjs/classes/${mainFile}`);
fs.statSync(mainFilePath);
}
this.childPool = new ChildPool({
mainFile: mainFilePath,
useWorkerThreads: this.opts.useWorkerThreads,
workerForkOptions: this.opts.workerForkOptions,
workerThreadsOptions: this.opts.workerThreadsOptions,
});
this.createSandbox(processor);
}
if (this.opts.autorun) {
this.run().catch(error => this.emit('error', error));
}
}
const connectionName = this.clientName() + (this.opts.name ? `:w:${this.opts.name}` : '');
this.blockingConnection = new RedisConnection(isRedisInstance(opts.connection)
? opts.connection.duplicate({ connectionName })
: Object.assign(Object.assign({}, opts.connection), { connectionName }), {
shared: false,
blocking: true,
skipVersionCheck: opts.skipVersionCheck,
});
this.blockingConnection.on('error', error => this.emit('error', error));
this.blockingConnection.on('ready', () => setTimeout(() => this.emit('ready'), 0));
}
/**
* Creates and configures the lock manager for processing jobs.
* This method can be overridden in subclasses to customize lock manager behavior.
*/
createLockManager() {
this.lockManager = new LockManager(this, {
lockRenewTime: this.opts.lockRenewTime,
lockDuration: this.opts.lockDuration,
workerId: this.id,
workerName: this.opts.name,
});
}
/**
* Creates and configures the sandbox for processing jobs.
* This method can be overridden in subclasses to customize sandbox behavior.
*
* @param processor - The processor file path, URL, or function to be sandboxed
*/
createSandbox(processor) {
this.processFn = sandbox(processor, this.childPool).bind(this);
}
/**
* Public accessor method for LockManager to extend locks.
* This delegates to the protected scripts object.
*/
async extendJobLocks(jobIds, tokens, duration) {
return this.scripts.extendLocks(jobIds, tokens, duration);
}
emit(event, ...args) {
return super.emit(event, ...args);
}
off(eventName, listener) {
super.off(eventName, listener);
return this;
}
on(event, listener) {
super.on(event, listener);
return this;
}
once(event, listener) {
super.once(event, listener);
return this;
}
callProcessJob(job, token, signal) {
return this.processFn(job, token, signal);
}
createJob(data, jobId) {
return this.Job.fromJSON(this, data, jobId);
}
/**
*
* Waits until the worker is ready to start processing jobs.
* In general only useful when writing tests.
*
*/
async waitUntilReady() {
await super.waitUntilReady();
return this.blockingConnection.client;
}
/**
* Cancels a specific job currently being processed by this worker.
* The job's processor function will receive an abort signal.
*
* @param jobId - The ID of the job to cancel
* @param reason - Optional reason for the cancellation
* @returns true if the job was found and cancelled, false otherwise
*/
cancelJob(jobId, reason) {
return this.lockManager.cancelJob(jobId, reason);
}
/**
* Cancels all jobs currently being processed by this worker.
* All active job processor functions will receive abort signals.
*
* @param reason - Optional reason for the cancellation
*/
cancelAllJobs(reason) {
this.lockManager.cancelAllJobs(reason);
}
set concurrency(concurrency) {
if (typeof concurrency !== 'number' ||
concurrency < 1 ||
!isFinite(concurrency)) {
throw new Error('concurrency must be a finite number greater than 0');
}
this._concurrency = concurrency;
}
get concurrency() {
return this._concurrency;
}
get repeat() {
return new Promise(async (resolve) => {
if (!this._repeat) {
const connection = await this.client;
this._repeat = new Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
this._repeat.on('error', e => this.emit.bind(this, e));
}
resolve(this._repeat);
});
}
get jobScheduler() {
return new Promise(async (resolve) => {
if (!this._jobScheduler) {
const connection = await this.client;
this._jobScheduler = new JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
this._jobScheduler.on('error', e => this.emit.bind(this, e));
}
resolve(this._jobScheduler);
});
}
async run() {
if (!this.processFn) {
throw new Error('No process function is defined.');
}
if (this.running) {
throw new Error('Worker is already running.');
}
try {
this.running = true;
if (this.closing || this.paused) {
return;
}
await this.startStalledCheckTimer();
if (!this.opts.skipLockRenewal) {
this.lockManager.start();
}
const client = await this.client;
const bclient = await this.blockingConnection.client;
this.mainLoopRunning = this.mainLoop(client, bclient);
// We must await here or finally will be called too early.
await this.mainLoopRunning;
}
finally {
this.running = false;
}
}
async waitForRateLimit() {
var _a;
const limitUntil = this.limitUntil;
if (limitUntil > Date.now()) {
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
this.abortDelayController = new AbortController();
const delay = this.getRateLimitDelay(limitUntil - Date.now());
await this.delay(delay, this.abortDelayController);
this.drained = false;
this.limitUntil = 0;
}
}
/**
* This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
* as efficiently as possible, providing concurrency and minimal unnecessary calls
* to Redis.
*/
async mainLoop(client, bclient) {
const asyncFifoQueue = new AsyncFifoQueue();
let tokenPostfix = 0;
while ((!this.closing && !this.paused) || asyncFifoQueue.numTotal() > 0) {
/**
* This inner loop tries to fetch jobs concurrently, but if we are waiting for a job
* to arrive at the queue we should not try to fetch more jobs (as it would be pointless)
*/
while (!this.closing &&
!this.paused &&
!this.waiting &&
asyncFifoQueue.numTotal() < this._concurrency &&
!this.isRateLimited()) {
const token = `${this.id}:${tokenPostfix++}`;
const fetchedJob = this.retryIfFailed(() => this._getNextJob(client, bclient, token, { block: true }), {
delayInMs: this.opts.runRetryDelay,
onlyEmitError: true,
});
asyncFifoQueue.add(fetchedJob);
if (this.waiting && asyncFifoQueue.numTotal() > 1) {
// We are waiting for jobs but we have others that we could start processing already
break;
}
// We await here so that we fetch jobs in sequence, this is important to avoid unnecessary calls
// to Redis in high concurrency scenarios.
const job = await fetchedJob;
// No more jobs waiting but we have others that could start processing already
if (!job && asyncFifoQueue.numTotal() > 1) {
break;
}
// If there are potential jobs to be processed and blockUntil is set, we should exit to avoid waiting
// for processing this job.
if (this.blockUntil) {
break;
}
}
// Since there can be undefined jobs in the queue (when a job fails or queue is empty)
// we iterate until we find a job.
let job;
do {
job = await asyncFifoQueue.fetch();
} while (!job && asyncFifoQueue.numQueued() > 0);
if (job) {
const token = job.token;
asyncFifoQueue.add(this.processJob(job, token, () => asyncFifoQueue.numTotal() <= this._concurrency));
}
else if (asyncFifoQueue.numQueued() === 0) {
await this.waitForRateLimit();
}
}
}
/**
* Returns a promise that resolves to the next job in queue.
* @param token - worker token to be assigned to retrieved job
* @returns a Job or undefined if no job was available in the queue.
*/
async getNextJob(token, { block = true } = {}) {
var _a, _b;
const nextJob = await this._getNextJob(await this.client, await this.blockingConnection.client, token, { block });
return this.trace(SpanKind.INTERNAL, 'getNextJob', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.QueueName]: this.name,
[TelemetryAttributes.WorkerName]: this.opts.name,
[TelemetryAttributes.WorkerOptions]: JSON.stringify({ block }),
[TelemetryAttributes.JobId]: nextJob === null || nextJob === void 0 ? void 0 : nextJob.id,
});
return nextJob;
}, (_b = (_a = nextJob === null || nextJob === void 0 ? void 0 : nextJob.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata);
}
async _getNextJob(client, bclient, token, { block = true } = {}) {
if (this.paused) {
return;
}
if (this.closing) {
return;
}
if (this.drained && block && !this.limitUntil && !this.waiting) {
this.waiting = this.waitForJob(bclient, this.blockUntil);
try {
this.blockUntil = await this.waiting;
if (this.blockUntil <= 0 || this.blockUntil - Date.now() < 1) {
return await this.moveToActive(client, token, this.opts.name);
}
}
finally {
this.waiting = null;
}
}
else {
if (!this.isRateLimited()) {
return this.moveToActive(client, token, this.opts.name);
}
}
}
/**
* Overrides the rate limit to be active for the next jobs.
* @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
* @param expireTimeMs - expire time in ms of this rate limit.
*/
async rateLimit(expireTimeMs) {
await this.trace(SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerRateLimit]: expireTimeMs,
});
await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
});
}
get minimumBlockTimeout() {
return this.blockingConnection.capabilities.canBlockFor1Ms
? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds.
Obviously we can still process much faster than 1 job per millisecond but delays and rate limits
will never work with more accuracy than 1ms. */
0.001
: 0.002;
}
isRateLimited() {
return this.limitUntil > Date.now();
}
async moveToActive(client, token, name) {
const [jobData, id, rateLimitDelay, delayUntil] = await this.scripts.moveToActive(client, token, name);
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, id, token);
}
async waitForJob(bclient, blockUntil) {
if (this.paused) {
return Infinity;
}
let timeout;
try {
if (!this.closing && !this.isRateLimited()) {
let blockTimeout = this.getBlockTimeout(blockUntil);
if (blockTimeout > 0) {
blockTimeout = this.blockingConnection.capabilities.canDoubleTimeout
? blockTimeout
: Math.ceil(blockTimeout);
// We cannot trust that the blocking connection stays blocking forever
// due to issues in Redis and IORedis, so we will reconnect if we
// don't get a response in the expected time.
timeout = setTimeout(async () => {
bclient.disconnect(!this.closing);
}, blockTimeout * 1000 + 1000);
this.updateDelays(); // reset delays to avoid reusing same values in next iteration
// Markers should only be used for un-blocking, so we will handle them in this
// function only.
const result = await bclient.bzpopmin(this.keys.marker, blockTimeout);
if (result) {
const [_key, member, score] = result;
if (member) {
const newBlockUntil = parseInt(score);
// Use by pro version as rate limited groups could generate lower blockUntil values
// markers only return delays for delayed jobs
if (blockUntil && newBlockUntil > blockUntil) {
return blockUntil;
}
return newBlockUntil;
}
}
}
return 0;
}
}
catch (error) {
if (isNotConnectionError(error)) {
this.emit('error', error);
}
if (!this.closing) {
await this.delay();
}
}
finally {
clearTimeout(timeout);
}
return Infinity;
}
getBlockTimeout(blockUntil) {
const opts = this.opts;
// when there are delayed jobs
if (blockUntil) {
const blockDelay = blockUntil - Date.now();
// when we reach the time to get new jobs
if (blockDelay <= 0) {
return blockDelay;
}
else if (blockDelay < this.minimumBlockTimeout * 1000) {
return this.minimumBlockTimeout;
}
else {
// We restrict the maximum block timeout to 10 second to avoid
// blocking the connection for too long in the case of reconnections
// reference: https://github.com/taskforcesh/bullmq/issues/1658
return Math.min(blockDelay / 1000, maximumBlockTimeout);
}
}
else {
return Math.max(opts.drainDelay, this.minimumBlockTimeout);
}
}
getRateLimitDelay(delay) {
// We restrict the maximum limit delay to the configured maximumRateLimitDelay
// to be able to promote delayed jobs while the queue is rate limited
return Math.min(delay, this.opts.maximumRateLimitDelay);
}
/**
*
* This function is exposed only for testing purposes.
*/
async delay(milliseconds, abortController) {
await delay(milliseconds || DELAY_TIME_1, abortController);
}
updateDelays(limitDelay = 0, delayUntil = 0) {
const clampedLimit = Math.max(limitDelay, 0);
if (clampedLimit > 0) {
this.limitUntil = Date.now() + clampedLimit;
}
else {
this.limitUntil = 0;
}
this.blockUntil = Math.max(delayUntil, 0) || 0;
}
async nextJobFromJobData(jobData, jobId, token) {
if (!jobData) {
if (!this.drained) {
this.emit('drained');
this.drained = true;
}
}
else {
this.drained = false;
const job = this.createJob(jobData, jobId);
job.token = token;
try {
await this.retryIfFailed(async () => {
if (job.repeatJobKey && job.repeatJobKey.split(':').length < 5) {
const jobScheduler = await this.jobScheduler;
await jobScheduler.upsertJobScheduler(
// Most of these arguments are not really needed
// anymore as we read them from the job scheduler itself
job.repeatJobKey, job.opts.repeat, job.name, job.data, job.opts, { override: false, producerId: job.id });
}
else if (job.opts.repeat) {
const repeat = await this.repeat;
await repeat.updateRepeatableJob(job.name, job.data, job.opts, {
override: false,
});
}
}, { delayInMs: this.opts.runRetryDelay });
}
catch (err) {
// Emit error but don't throw to avoid breaking current job completion
// Note: This means the next repeatable job will not be scheduled
const errorMessage = err instanceof Error ? err.message : String(err);
const schedulingError = new Error(`Failed to add repeatable job for next iteration: ${errorMessage}`);
this.emit('error', schedulingError);
// Return undefined to indicate no next job is available
return undefined;
}
return job;
}
}
async processJob(job, token, fetchNextCallback = () => true) {
var _a, _b;
const srcPropagationMedatada = (_b = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata;
return this.trace(SpanKind.CONSUMER, 'process', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
[TelemetryAttributes.JobId]: job.id,
[TelemetryAttributes.JobName]: job.name,
});
this.emit('active', job, 'waiting');
const processedOn = Date.now();
const abortController = this.lockManager.trackJob(job.id, token, processedOn, this.processorAcceptsSignal);
try {
const unrecoverableErrorMessage = this.getUnrecoverableErrorMessage(job);
if (unrecoverableErrorMessage) {
const failed = await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleFailed(new UnrecoverableError(unrecoverableErrorMessage), job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span });
return failed;
}
const result = await this.callProcessJob(job, token, abortController
? abortController.signal
: undefined);
return await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleCompleted(result, job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span });
}
catch (err) {
const failed = await this.retryIfFailed(() => {
this.lockManager.untrackJob(job.id);
return this.handleFailed(err, job, token, fetchNextCallback, span);
}, { delayInMs: this.opts.runRetryDelay, span, onlyEmitError: true });
return failed;
}
finally {
this.lockManager.untrackJob(job.id);
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobFinishedTimestamp]: Date.now(),
[TelemetryAttributes.JobProcessedTimestamp]: processedOn,
});
}
}, srcPropagationMedatada);
}
getUnrecoverableErrorMessage(job) {
if (job.deferredFailure) {
return job.deferredFailure;
}
if (this.opts.maxStartedAttempts &&
this.opts.maxStartedAttempts < job.attemptsStarted) {
return 'job started more than allowable limit';
}
}
async handleCompleted(result, job, token, fetchNextCallback = () => true, span) {
if (!this.connection.closing) {
const completed = await job.moveToCompleted(result, token, fetchNextCallback() && !(this.closing || this.paused));
this.emit('completed', job, result, 'active');
span === null || span === void 0 ? void 0 : span.addEvent('job completed', {
[TelemetryAttributes.JobResult]: JSON.stringify(result),
});
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
});
if (Array.isArray(completed)) {
const [jobData, jobId, rateLimitDelay, delayUntil] = completed;
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, jobId, token);
}
}
}
async handleFailed(err, job, token, fetchNextCallback = () => true, span) {
if (!this.connection.closing) {
// Check if the job was manually rate-limited
if (err.message === RATE_LIMIT_ERROR) {
const rateLimitTtl = await this.moveLimitedBackToWait(job, token);
this.limitUntil = rateLimitTtl > 0 ? Date.now() + rateLimitTtl : 0;
return;
}
if (err instanceof DelayedError ||
err.name == 'DelayedError' ||
err instanceof WaitingError ||
err.name == 'WaitingError' ||
err instanceof WaitingChildrenError ||
err.name == 'WaitingChildrenError') {
const client = await this.client;
return this.moveToActive(client, token, this.opts.name);
}
const result = await job.moveToFailed(err, token, fetchNextCallback() && !(this.closing || this.paused));
this.emit('failed', job, err, 'active');
span === null || span === void 0 ? void 0 : span.addEvent('job failed', {
[TelemetryAttributes.JobFailedReason]: err.message,
});
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
});
// Note: result can be undefined if moveToFailed fails (e.g., lock was lost)
if (Array.isArray(result)) {
const [jobData, jobId, rateLimitDelay, delayUntil] = result;
this.updateDelays(rateLimitDelay, delayUntil);
return this.nextJobFromJobData(jobData, jobId, token);
}
}
}
/**
*
* Pauses the processing of this queue only for this worker.
*/
async pause(doNotWaitActive) {
await this.trace(SpanKind.INTERNAL, 'pause', this.name, async (span) => {
var _a;
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
[TelemetryAttributes.WorkerDoNotWaitActive]: doNotWaitActive,
});
if (!this.paused) {
this.paused = true;
if (!doNotWaitActive) {
await this.whenCurrentJobsFinished();
}
(_a = this.stalledCheckStopper) === null || _a === void 0 ? void 0 : _a.call(this);
this.emit('paused');
}
});
}
/**
*
* Resumes processing of this worker (if paused).
*/
resume() {
if (!this.running) {
this.trace(SpanKind.INTERNAL, 'resume', this.name, span => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
});
this.paused = false;
if (this.processFn) {
this.run();
}
this.emit('resumed');
});
}
}
/**
*
* Checks if worker is paused.
*
* @returns true if worker is paused, false otherwise.
*/
isPaused() {
return !!this.paused;
}
/**
*
* Checks if worker is currently running.
*
* @returns true if worker is running, false otherwise.
*/
isRunning() {
return this.running;
}
/**
*
* Closes the worker and related redis connections.
*
* This method waits for current jobs to finalize before returning.
*
* @param force - Use force boolean parameter if you do not want to wait for
* current jobs to be processed. When using telemetry, be mindful that it can
* interfere with the proper closure of spans, potentially preventing them from being exported.
*
* @returns Promise that resolves when the worker has been closed.
*/
async close(force = false) {
if (this.closing) {
return this.closing;
}
this.closing = (async () => {
await this.trace(SpanKind.INTERNAL, 'close', this.name, async (span) => {
var _a, _b;
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
[TelemetryAttributes.WorkerForceClose]: force,
});
this.emit('closing', 'closing queue');
(_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
// Define the async cleanup functions
const asyncCleanups = [
() => {
return force || this.whenCurrentJobsFinished(false);
},
() => this.lockManager.close(),
() => { var _a; return (_a = this.childPool) === null || _a === void 0 ? void 0 : _a.clean(); },
() => this.blockingConnection.close(force),
() => this.connection.close(force),
];
// Run cleanup functions sequentially and make sure all are run despite any errors
for (const cleanup of asyncCleanups) {
try {
await cleanup();
}
catch (err) {
this.emit('error', err);
}
}
(_b = this.stalledCheckStopper) === null || _b === void 0 ? void 0 : _b.call(this);
this.closed = true;
this.emit('closed');
});
})();
return await this.closing;
}
/**
*
* Manually starts the stalled checker.
* The check will run once as soon as this method is called, and
* then every opts.stalledInterval milliseconds until the worker is closed.
* Note: Normally you do not need to call this method, since the stalled checker
* is automatically started when the worker starts processing jobs after
* calling run. However if you want to process the jobs manually you need
* to call this method to start the stalled checker.
*
* @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
*/
async startStalledCheckTimer() {
if (!this.opts.skipStalledCheck) {
if (!this.closing) {
await this.trace(SpanKind.INTERNAL, 'startStalledCheckTimer', this.name, async (span) => {
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
});
this.stalledChecker().catch(err => {
this.emit('error', err);
});
});
}
}
}
async stalledChecker() {
while (!(this.closing || this.paused)) {
await this.checkConnectionError(() => this.moveStalledJobsToWait());
await new Promise(resolve => {
const timeout = setTimeout(resolve, this.opts.stalledInterval);
this.stalledCheckStopper = () => {
clearTimeout(timeout);
resolve();
};
});
}
}
/**
* Returns a promise that resolves when active jobs are cleared
*
* @returns
*/
async whenCurrentJobsFinished(reconnect = true) {
//
// Force reconnection of blocking connection to abort blocking redis call immediately.
//
if (this.waiting) {
// If we are not going to reconnect, we will not wait for the disconnection.
await this.blockingConnection.disconnect(reconnect);
}
else {
reconnect = false;
}
if (this.mainLoopRunning) {
await this.mainLoopRunning;
}
reconnect && (await this.blockingConnection.reconnect());
}
async retryIfFailed(fn, opts) {
var _a;
let retry = 0;
const maxRetries = opts.maxRetries || Infinity;
do {
try {
return await fn();
}
catch (err) {
(_a = opts.span) === null || _a === void 0 ? void 0 : _a.recordException(err.message);
if (isNotConnectionError(err)) {
// Emit error when not paused or closing; optionally swallow (no throw) when opts.onlyEmitError is set.
if (!this.paused && !this.closing) {
this.emit('error', err);
}
if (opts.onlyEmitError) {
return;
}
else {
throw err;
}
}
else {
if (opts.delayInMs && !this.closing && !this.closed) {
await this.delay(opts.delayInMs, this.abortDelayController);
}
if (retry + 1 >= maxRetries) {
// If we've reached max retries, throw the last error
throw err;
}
}
}
} while (++retry < maxRetries);
}
async moveStalledJobsToWait() {
await this.trace(SpanKind.INTERNAL, 'moveStalledJobsToWait', this.name, async (span) => {
const stalled = await this.scripts.moveStalledJobsToWait();
span === null || span === void 0 ? void 0 : span.setAttributes({
[TelemetryAttributes.WorkerId]: this.id,
[TelemetryAttributes.WorkerName]: this.opts.name,
[TelemetryAttributes.WorkerStalledJobs]: stalled,
});
stalled.forEach((jobId) => {
span === null || span === void 0 ? void 0 : span.addEvent('job stalled', {
[TelemetryAttributes.JobId]: jobId,
});
this.emit('stalled', jobId, 'active');
});
});
}
moveLimitedBackToWait(job, token) {
return job.moveToWait(token);
}
}
//# sourceMappingURL=worker.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,107 @@
--[[
Adds a delayed job to the queue by doing the following:
- Increases the job counter if needed.
- Creates a new job key with the job data.
- computes timestamp.
- adds to delayed zset.
- Emits a global event 'delayed' if the job is delayed.
Input:
KEYS[1] 'marker',
KEYS[2] 'meta'
KEYS[3] 'id'
KEYS[4] 'delayed'
KEYS[5] 'completed'
KEYS[6] events stream key
ARGV[1] msgpacked arguments array
[1] key prefix,
[2] custom id (use custom instead of one generated automatically)
[3] name
[4] timestamp
[5] parentKey?
[6] parent dependencies key.
[7] parent? {id, queueKey}
[8] repeat job key
[9] deduplication key
ARGV[2] Json stringified job data
ARGV[3] msgpacked options
Output:
jobId - OK
-5 - Missing parent key
]]
local metaKey = KEYS[2]
local idKey = KEYS[3]
local delayedKey = KEYS[4]
local completedKey = KEYS[5]
local eventsKey = KEYS[6]
local jobId
local jobIdKey
local rcall = redis.call
local args = cmsgpack.unpack(ARGV[1])
local data = ARGV[2]
local parentKey = args[5]
local parent = args[7]
local repeatJobKey = args[8]
local deduplicationKey = args[9]
local parentData
-- Includes
--- @include "includes/addDelayedJob"
--- @include "includes/deduplicateJob"
--- @include "includes/getOrSetMaxEvents"
--- @include "includes/handleDuplicatedJob"
--- @include "includes/storeJob"
if parentKey ~= nil then
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
parentData = cjson.encode(parent)
end
local jobCounter = rcall("INCR", idKey)
local maxEvents = getOrSetMaxEvents(metaKey)
local opts = cmsgpack.unpack(ARGV[3])
local parentDependenciesKey = args[6]
local timestamp = args[4]
if args[2] == "" then
jobId = jobCounter
jobIdKey = args[1] .. jobId
else
jobId = args[2]
jobIdKey = args[1] .. jobId
if rcall("EXISTS", jobIdKey) == 1 then
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
parentData, parentDependenciesKey, completedKey, eventsKey,
maxEvents, timestamp)
end
end
local deduplicationJobId = deduplicateJob(opts['de'], jobId, delayedKey, deduplicationKey,
eventsKey, maxEvents, args[1])
if deduplicationJobId then
return deduplicationJobId
end
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
opts, timestamp, parentKey, parentData, repeatJobKey)
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, KEYS[1], delay)
-- Check if this job is a child of another job, if so add it to the parents dependencies
if parentDependenciesKey ~= nil then
rcall("SADD", parentDependenciesKey, jobIdKey)
end
return jobId .. "" -- convert to string

View File

@@ -0,0 +1,198 @@
--[[
Adds a job scheduler, i.e. a job factory that creates jobs based on a given schedule (repeat options).
Input:
KEYS[1] 'repeat' key
KEYS[2] 'delayed' key
KEYS[3] 'wait' key
KEYS[4] 'paused' key
KEYS[5] 'meta' key
KEYS[6] 'prioritized' key
KEYS[7] 'marker' key
KEYS[8] 'id' key
KEYS[9] 'events' key
KEYS[10] 'pc' priority counter
KEYS[11] 'active' key
ARGV[1] next milliseconds
ARGV[2] msgpacked options
[1] name
[2] tz?
[3] pattern?
[4] endDate?
[5] every?
ARGV[3] jobs scheduler id
ARGV[4] Json stringified template data
ARGV[5] mspacked template opts
ARGV[6] msgpacked delayed opts
ARGV[7] timestamp
ARGV[8] prefix key
ARGV[9] producer key
Output:
repeatableKey - OK
]] local rcall = redis.call
local repeatKey = KEYS[1]
local delayedKey = KEYS[2]
local waitKey = KEYS[3]
local pausedKey = KEYS[4]
local metaKey = KEYS[5]
local prioritizedKey = KEYS[6]
local eventsKey = KEYS[9]
local nextMillis = ARGV[1]
local jobSchedulerId = ARGV[3]
local templateOpts = cmsgpack.unpack(ARGV[5])
local now = tonumber(ARGV[7])
local prefixKey = ARGV[8]
local jobOpts = cmsgpack.unpack(ARGV[6])
-- Includes
--- @include "includes/addJobFromScheduler"
--- @include "includes/getOrSetMaxEvents"
--- @include "includes/isQueuePaused"
--- @include "includes/removeJob"
--- @include "includes/storeJobScheduler"
--- @include "includes/getJobSchedulerEveryNextMillis"
-- If we are overriding a repeatable job we must delete the delayed job for
-- the next iteration.
local schedulerKey = repeatKey .. ":" .. jobSchedulerId
local maxEvents = getOrSetMaxEvents(metaKey)
local templateData = ARGV[4]
local prevMillis = rcall("ZSCORE", repeatKey, jobSchedulerId)
if prevMillis then
prevMillis = tonumber(prevMillis)
end
local schedulerOpts = cmsgpack.unpack(ARGV[2])
local every = schedulerOpts['every']
-- For backwards compatibility we also check the offset from the job itself.
-- could be removed in future major versions.
local jobOffset = jobOpts['repeat'] and jobOpts['repeat']['offset'] or 0
local offset = schedulerOpts['offset'] or jobOffset or 0
local newOffset = offset
local updatedEvery = false
if every then
-- if we changed the 'every' value we need to reset millis to nil
local millis = prevMillis
if prevMillis then
local prevEvery = tonumber(rcall("HGET", schedulerKey, "every"))
if prevEvery ~= every then
millis = nil
updatedEvery = true
end
end
local startDate = schedulerOpts['startDate']
nextMillis, newOffset = getJobSchedulerEveryNextMillis(millis, every, now, offset, startDate)
end
local function removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, jobId, metaKey,
eventsKey)
if rcall("ZSCORE", delayedKey, jobId) then
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
rcall("ZREM", delayedKey, jobId)
return true
elseif rcall("ZSCORE", prioritizedKey, jobId) then
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
rcall("ZREM", prioritizedKey, jobId)
return true
else
local pausedOrWaitKey = waitKey
if isQueuePaused(metaKey) then
pausedOrWaitKey = pausedKey
end
if rcall("LREM", pausedOrWaitKey, 1, jobId) > 0 then
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
return true
end
end
return false
end
local removedPrevJob = false
if prevMillis then
local currentJobId = "repeat:" .. jobSchedulerId .. ":" .. prevMillis
local currentJobKey = schedulerKey .. ":" .. prevMillis
-- In theory it should always exist the currentJobKey if there is a prevMillis unless something has
-- gone really wrong.
if rcall("EXISTS", currentJobKey) == 1 then
removedPrevJob = removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, currentJobId,
metaKey, eventsKey)
end
end
if removedPrevJob then
-- The jobs has been removed and we want to replace it, so lets use the same millis.
if every and not updatedEvery then
nextMillis = prevMillis
end
else
-- Special case where no job was removed, and we need to add the next iteration.
schedulerOpts['offset'] = newOffset
end
-- Check for job ID collision with existing jobs (in any state)
local jobId = "repeat:" .. jobSchedulerId .. ":" .. nextMillis
local jobKey = prefixKey .. jobId
-- If there's already a job with this ID, in a state
-- that is not updatable (active, completed, failed) we must
-- handle the collision
local hasCollision = false
if rcall("EXISTS", jobKey) == 1 then
if every then
-- For 'every' case: try next time slot to avoid collision
local nextSlotMillis = nextMillis + every
local nextSlotJobId = "repeat:" .. jobSchedulerId .. ":" .. nextSlotMillis
local nextSlotJobKey = prefixKey .. nextSlotJobId
if rcall("EXISTS", nextSlotJobKey) == 0 then
-- Next slot is free, use it
nextMillis = nextSlotMillis
jobId = nextSlotJobId
else
-- Next slot also has a job, return error code
return -11 -- SchedulerJobSlotsBusy
end
else
hasCollision = true
end
end
local delay = nextMillis - now
-- Fast Clamp delay to minimum of 0
if delay < 0 then
delay = 0
end
local nextJobKey = schedulerKey .. ":" .. nextMillis
if not hasCollision or removedPrevJob then
-- jobId already calculated above during collision check
storeJobScheduler(jobSchedulerId, schedulerKey, repeatKey, nextMillis, schedulerOpts, templateData, templateOpts)
rcall("INCR", KEYS[8])
addJobFromScheduler(nextJobKey, jobId, jobOpts, waitKey, pausedKey, KEYS[11], metaKey, prioritizedKey, KEYS[10],
delayedKey, KEYS[7], eventsKey, schedulerOpts['name'], maxEvents, now, templateData, jobSchedulerId, delay)
elseif hasCollision then
-- For 'pattern' case: return error code
return -10 -- SchedulerJobIdCollision
end
if ARGV[9] ~= "" then
rcall("HSET", ARGV[9], "nrjid", jobId)
end
return {jobId .. "", delay}

View File

@@ -0,0 +1,30 @@
--[[
Add job log
Input:
KEYS[1] job id key
KEYS[2] job logs key
ARGV[1] id
ARGV[2] log
ARGV[3] keepLogs
Output:
-1 - Missing job.
]]
local rcall = redis.call
if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists
local logCount = rcall("RPUSH", KEYS[2], ARGV[2])
if ARGV[3] ~= '' then
local keepLogs = tonumber(ARGV[3])
rcall("LTRIM", KEYS[2], -keepLogs, -1)
return math.min(keepLogs, logCount)
end
return logCount
else
return -1
end

View File

@@ -0,0 +1,98 @@
--[[
Adds a parent job to the queue by doing the following:
- Increases the job counter if needed.
- Creates a new job key with the job data.
- adds the job to the waiting-children zset
Input:
KEYS[1] 'meta'
KEYS[2] 'id'
KEYS[3] 'delayed'
KEYS[4] 'waiting-children'
KEYS[5] 'completed'
KEYS[6] events stream key
ARGV[1] msgpacked arguments array
[1] key prefix,
[2] custom id (will not generate one automatically)
[3] name
[4] timestamp
[5] parentKey?
[6] parent dependencies key.
[7] parent? {id, queueKey}
[8] repeat job key
[9] deduplication key
ARGV[2] Json stringified job data
ARGV[3] msgpacked options
Output:
jobId - OK
-5 - Missing parent key
]]
local metaKey = KEYS[1]
local idKey = KEYS[2]
local completedKey = KEYS[5]
local eventsKey = KEYS[6]
local jobId
local jobIdKey
local rcall = redis.call
local args = cmsgpack.unpack(ARGV[1])
local data = ARGV[2]
local opts = cmsgpack.unpack(ARGV[3])
local parentKey = args[5]
local parent = args[7]
local repeatJobKey = args[8]
local deduplicationKey = args[9]
local parentData
-- Includes
--- @include "includes/getOrSetMaxEvents"
--- @include "includes/handleDuplicatedJob"
--- @include "includes/storeJob"
if parentKey ~= nil then
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
parentData = cjson.encode(parent)
end
local jobCounter = rcall("INCR", idKey)
local maxEvents = getOrSetMaxEvents(metaKey)
local parentDependenciesKey = args[6]
local timestamp = args[4]
if args[2] == "" then
jobId = jobCounter
jobIdKey = args[1] .. jobId
else
jobId = args[2]
jobIdKey = args[1] .. jobId
if rcall("EXISTS", jobIdKey) == 1 then
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
parentData, parentDependenciesKey, completedKey, eventsKey,
maxEvents, timestamp)
end
end
-- Store the job.
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
parentKey, parentData, repeatJobKey)
local waitChildrenKey = KEYS[4]
rcall("ZADD", waitChildrenKey, timestamp, jobId)
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event",
"waiting-children", "jobId", jobId)
-- Check if this job is a child of another job, if so add it to the parents dependencies
if parentDependenciesKey ~= nil then
rcall("SADD", parentDependenciesKey, jobIdKey)
end
return jobId .. "" -- convert to string

View File

@@ -0,0 +1,117 @@
--[[
Adds a priotitized job to the queue by doing the following:
- Increases the job counter if needed.
- Creates a new job key with the job data.
- Adds the job to the "added" list so that workers gets notified.
Input:
KEYS[1] 'marker',
KEYS[2] 'meta'
KEYS[3] 'id'
KEYS[4] 'prioritized'
KEYS[5] 'delayed'
KEYS[6] 'completed'
KEYS[7] 'active'
KEYS[8] events stream key
KEYS[9] 'pc' priority counter
ARGV[1] msgpacked arguments array
[1] key prefix,
[2] custom id (will not generate one automatically)
[3] name
[4] timestamp
[5] parentKey?
[6] parent dependencies key.
[7] parent? {id, queueKey}
[8] repeat job key
[9] deduplication key
ARGV[2] Json stringified job data
ARGV[3] msgpacked options
Output:
jobId - OK
-5 - Missing parent key
]]
local metaKey = KEYS[2]
local idKey = KEYS[3]
local priorityKey = KEYS[4]
local completedKey = KEYS[6]
local activeKey = KEYS[7]
local eventsKey = KEYS[8]
local priorityCounterKey = KEYS[9]
local jobId
local jobIdKey
local rcall = redis.call
local args = cmsgpack.unpack(ARGV[1])
local data = ARGV[2]
local opts = cmsgpack.unpack(ARGV[3])
local parentKey = args[5]
local parent = args[7]
local repeatJobKey = args[8]
local deduplicationKey = args[9]
local parentData
-- Includes
--- @include "includes/addJobWithPriority"
--- @include "includes/deduplicateJob"
--- @include "includes/storeJob"
--- @include "includes/getOrSetMaxEvents"
--- @include "includes/handleDuplicatedJob"
--- @include "includes/isQueuePausedOrMaxed"
if parentKey ~= nil then
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
parentData = cjson.encode(parent)
end
local jobCounter = rcall("INCR", idKey)
local maxEvents = getOrSetMaxEvents(metaKey)
local parentDependenciesKey = args[6]
local timestamp = args[4]
if args[2] == "" then
jobId = jobCounter
jobIdKey = args[1] .. jobId
else
jobId = args[2]
jobIdKey = args[1] .. jobId
if rcall("EXISTS", jobIdKey) == 1 then
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
parentData, parentDependenciesKey, completedKey, eventsKey,
maxEvents, timestamp)
end
end
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[5],
deduplicationKey, eventsKey, maxEvents, args[1])
if deduplicationJobId then
return deduplicationJobId
end
-- Store the job.
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
opts, timestamp, parentKey, parentData,
repeatJobKey)
-- Add the job to the prioritized set
local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, activeKey)
addJobWithPriority( KEYS[1], priorityKey, priority, jobId, priorityCounterKey, isPausedOrMaxed)
-- Emit waiting event
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
"jobId", jobId)
-- Check if this job is a child of another job, if so add it to the parents dependencies
if parentDependenciesKey ~= nil then
rcall("SADD", parentDependenciesKey, jobIdKey)
end
return jobId .. "" -- convert to string

View File

@@ -0,0 +1,84 @@
--[[
Adds a repeatable job
Input:
KEYS[1] 'repeat' key
KEYS[2] 'delayed' key
ARGV[1] next milliseconds
ARGV[2] msgpacked options
[1] name
[2] tz?
[3] pattern?
[4] endDate?
[5] every?
ARGV[3] legacy custom key TODO: remove this logic in next breaking change
ARGV[4] custom key
ARGV[5] prefix key
Output:
repeatableKey - OK
]]
local rcall = redis.call
local repeatKey = KEYS[1]
local delayedKey = KEYS[2]
local nextMillis = ARGV[1]
local legacyCustomKey = ARGV[3]
local customKey = ARGV[4]
local prefixKey = ARGV[5]
-- Includes
--- @include "includes/removeJob"
local function storeRepeatableJob(repeatKey, customKey, nextMillis, rawOpts)
rcall("ZADD", repeatKey, nextMillis, customKey)
local opts = cmsgpack.unpack(rawOpts)
local optionalValues = {}
if opts['tz'] then
table.insert(optionalValues, "tz")
table.insert(optionalValues, opts['tz'])
end
if opts['pattern'] then
table.insert(optionalValues, "pattern")
table.insert(optionalValues, opts['pattern'])
end
if opts['endDate'] then
table.insert(optionalValues, "endDate")
table.insert(optionalValues, opts['endDate'])
end
if opts['every'] then
table.insert(optionalValues, "every")
table.insert(optionalValues, opts['every'])
end
rcall("HMSET", repeatKey .. ":" .. customKey, "name", opts['name'],
unpack(optionalValues))
return customKey
end
-- If we are overriding a repeatable job we must delete the delayed job for
-- the next iteration.
local prevMillis = rcall("ZSCORE", repeatKey, customKey)
if prevMillis then
local delayedJobId = "repeat:" .. customKey .. ":" .. prevMillis
local nextDelayedJobId = repeatKey .. ":" .. customKey .. ":" .. nextMillis
if rcall("ZSCORE", delayedKey, delayedJobId)
and rcall("EXISTS", nextDelayedJobId) ~= 1 then
removeJob(delayedJobId, true, prefixKey, true --[[remove debounce key]])
rcall("ZREM", delayedKey, delayedJobId)
end
end
-- Keep backwards compatibility with old repeatable jobs (<= 3.0.0)
if rcall("ZSCORE", repeatKey, legacyCustomKey) ~= false then
return storeRepeatableJob(repeatKey, legacyCustomKey, nextMillis, ARGV[2])
end
return storeRepeatableJob(repeatKey, customKey, nextMillis, ARGV[2])

View File

@@ -0,0 +1,122 @@
--[[
Adds a job to the queue by doing the following:
- Increases the job counter if needed.
- Creates a new job key with the job data.
- if delayed:
- computes timestamp.
- adds to delayed zset.
- Emits a global event 'delayed' if the job is delayed.
- if not delayed
- Adds the jobId to the wait/paused list in one of three ways:
- LIFO
- FIFO
- prioritized.
- Adds the job to the "added" list so that workers gets notified.
Input:
KEYS[1] 'wait',
KEYS[2] 'paused'
KEYS[3] 'meta'
KEYS[4] 'id'
KEYS[5] 'completed'
KEYS[6] 'delayed'
KEYS[7] 'active'
KEYS[8] events stream key
KEYS[9] marker key
ARGV[1] msgpacked arguments array
[1] key prefix,
[2] custom id (will not generate one automatically)
[3] name
[4] timestamp
[5] parentKey?
[6] parent dependencies key.
[7] parent? {id, queueKey}
[8] repeat job key
[9] deduplication key
ARGV[2] Json stringified job data
ARGV[3] msgpacked options
Output:
jobId - OK
-5 - Missing parent key
]]
local eventsKey = KEYS[8]
local jobId
local jobIdKey
local rcall = redis.call
local args = cmsgpack.unpack(ARGV[1])
local data = ARGV[2]
local opts = cmsgpack.unpack(ARGV[3])
local parentKey = args[5]
local parent = args[7]
local repeatJobKey = args[8]
local deduplicationKey = args[9]
local parentData
-- Includes
--- @include "includes/addJobInTargetList"
--- @include "includes/deduplicateJob"
--- @include "includes/getOrSetMaxEvents"
--- @include "includes/getTargetQueueList"
--- @include "includes/handleDuplicatedJob"
--- @include "includes/storeJob"
if parentKey ~= nil then
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
parentData = cjson.encode(parent)
end
local jobCounter = rcall("INCR", KEYS[4])
local metaKey = KEYS[3]
local maxEvents = getOrSetMaxEvents(metaKey)
local parentDependenciesKey = args[6]
local timestamp = args[4]
if args[2] == "" then
jobId = jobCounter
jobIdKey = args[1] .. jobId
else
jobId = args[2]
jobIdKey = args[1] .. jobId
if rcall("EXISTS", jobIdKey) == 1 then
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
parentData, parentDependenciesKey, KEYS[5], eventsKey,
maxEvents, timestamp)
end
end
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[6],
deduplicationKey, eventsKey, maxEvents, args[1])
if deduplicationJobId then
return deduplicationJobId
end
-- Store the job.
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
parentKey, parentData, repeatJobKey)
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[7], KEYS[1], KEYS[2])
-- LIFO or FIFO
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
addJobInTargetList(target, KEYS[9], pushCmd, isPausedOrMaxed, jobId)
-- Emit waiting event
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
"jobId", jobId)
-- Check if this job is a child of another job, if so add it to the parents dependencies
if parentDependenciesKey ~= nil then
rcall("SADD", parentDependenciesKey, jobIdKey)
end
return jobId .. "" -- convert to string

View File

@@ -0,0 +1,55 @@
--[[
Change job delay when it is in delayed set.
Input:
KEYS[1] delayed key
KEYS[2] meta key
KEYS[3] marker key
KEYS[4] events stream
ARGV[1] delay
ARGV[2] timestamp
ARGV[3] the id of the job
ARGV[4] job key
Output:
0 - OK
-1 - Missing job.
-3 - Job not in delayed set.
Events:
- delayed key.
]]
local rcall = redis.call
-- Includes
--- @include "includes/addDelayMarkerIfNeeded"
--- @include "includes/getDelayedScore"
--- @include "includes/getOrSetMaxEvents"
if rcall("EXISTS", ARGV[4]) == 1 then
local jobId = ARGV[3]
local delay = tonumber(ARGV[1])
local score, delayedTimestamp = getDelayedScore(KEYS[1], ARGV[2], delay)
local numRemovedElements = rcall("ZREM", KEYS[1], jobId)
if numRemovedElements < 1 then
return -3
end
rcall("HSET", ARGV[4], "delay", delay)
rcall("ZADD", KEYS[1], score, jobId)
local maxEvents = getOrSetMaxEvents(KEYS[2])
rcall("XADD", KEYS[4], "MAXLEN", "~", maxEvents, "*", "event", "delayed",
"jobId", jobId, "delay", delayedTimestamp)
-- mark that a delayed job is available
addDelayMarkerIfNeeded(KEYS[3], KEYS[1])
return 0
else
return -1
end

View File

@@ -0,0 +1,68 @@
--[[
Change job priority
Input:
KEYS[1] 'wait',
KEYS[2] 'paused'
KEYS[3] 'meta'
KEYS[4] 'prioritized'
KEYS[5] 'active'
KEYS[6] 'pc' priority counter
KEYS[7] 'marker'
ARGV[1] priority value
ARGV[2] prefix key
ARGV[3] job id
ARGV[4] lifo
Output:
0 - OK
-1 - Missing job
]]
local jobId = ARGV[3]
local jobKey = ARGV[2] .. jobId
local priority = tonumber(ARGV[1])
local rcall = redis.call
-- Includes
--- @include "includes/addJobInTargetList"
--- @include "includes/addJobWithPriority"
--- @include "includes/getTargetQueueList"
--- @include "includes/pushBackJobWithPriority"
local function reAddJobWithNewPriority( prioritizedKey, markerKey, targetKey,
priorityCounter, lifo, priority, jobId, isPausedOrMaxed)
if priority == 0 then
local pushCmd = lifo and 'RPUSH' or 'LPUSH'
addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
else
if lifo then
pushBackJobWithPriority(prioritizedKey, priority, jobId)
else
addJobWithPriority(markerKey, prioritizedKey, priority, jobId,
priorityCounter, isPausedOrMaxed)
end
end
end
if rcall("EXISTS", jobKey) == 1 then
local metaKey = KEYS[3]
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[5], KEYS[1], KEYS[2])
local prioritizedKey = KEYS[4]
local priorityCounterKey = KEYS[6]
local markerKey = KEYS[7]
-- Re-add with the new priority
if rcall("ZREM", prioritizedKey, jobId) > 0 then
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
elseif rcall("LREM", target, -1, jobId) > 0 then
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
end
rcall("HSET", jobKey, "priority", priority)
return 0
else
return -1
end

View File

@@ -0,0 +1,59 @@
--[[
Remove jobs from the specific set.
Input:
KEYS[1] set key,
KEYS[2] events stream key
KEYS[3] repeat key
ARGV[1] jobKey prefix
ARGV[2] timestamp
ARGV[3] limit the number of jobs to be removed. 0 is unlimited
ARGV[4] set name, can be any of 'wait', 'active', 'paused', 'delayed', 'completed', or 'failed'
]]
local rcall = redis.call
local repeatKey = KEYS[3]
local rangeStart = 0
local rangeEnd = -1
local limit = tonumber(ARGV[3])
-- If we're only deleting _n_ items, avoid retrieving all items
-- for faster performance
--
-- Start from the tail of the list, since that's where oldest elements
-- are generally added for FIFO lists
if limit > 0 then
rangeStart = -1 - limit + 1
rangeEnd = -1
end
-- Includes
--- @include "includes/cleanList"
--- @include "includes/cleanSet"
local result
if ARGV[4] == "active" then
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], false --[[ hasFinished ]],
repeatKey)
elseif ARGV[4] == "delayed" then
rangeEnd = "+inf"
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
{"processedOn", "timestamp"}, false --[[ hasFinished ]], repeatKey)
elseif ARGV[4] == "prioritized" then
rangeEnd = "+inf"
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
{"timestamp"}, false --[[ hasFinished ]], repeatKey)
elseif ARGV[4] == "wait" or ARGV[4] == "paused" then
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], true --[[ hasFinished ]],
repeatKey)
else
rangeEnd = ARGV[2]
-- No need to pass repeat key as in that moment job won't be related to a job scheduler
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
{"finishedOn"}, true --[[ hasFinished ]])
end
rcall("XADD", KEYS[2], "*", "event", "cleaned", "count", result[2])
return result[1]

Some files were not shown because too many files have changed in this diff Show More