Projektstart
This commit is contained in:
107
backend/node_modules/bullmq/dist/cjs/commands/addDelayedJob-6.lua
generated
vendored
Normal file
107
backend/node_modules/bullmq/dist/cjs/commands/addDelayedJob-6.lua
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
--[[
|
||||
Adds a delayed job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
|
||||
- computes timestamp.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'marker',
|
||||
KEYS[2] 'meta'
|
||||
KEYS[3] 'id'
|
||||
KEYS[4] 'delayed'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] events stream key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (use custom instead of one generated automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[2]
|
||||
local idKey = KEYS[3]
|
||||
local delayedKey = KEYS[4]
|
||||
|
||||
local completedKey = KEYS[5]
|
||||
local eventsKey = KEYS[6]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayedJob"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, delayedKey, deduplicationKey,
|
||||
eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
|
||||
opts, timestamp, parentKey, parentData, repeatJobKey)
|
||||
|
||||
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, KEYS[1], delay)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
198
backend/node_modules/bullmq/dist/cjs/commands/addJobScheduler-11.lua
generated
vendored
Normal file
198
backend/node_modules/bullmq/dist/cjs/commands/addJobScheduler-11.lua
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
--[[
|
||||
Adds a job scheduler, i.e. a job factory that creates jobs based on a given schedule (repeat options).
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
KEYS[2] 'delayed' key
|
||||
KEYS[3] 'wait' key
|
||||
KEYS[4] 'paused' key
|
||||
KEYS[5] 'meta' key
|
||||
KEYS[6] 'prioritized' key
|
||||
KEYS[7] 'marker' key
|
||||
KEYS[8] 'id' key
|
||||
KEYS[9] 'events' key
|
||||
KEYS[10] 'pc' priority counter
|
||||
KEYS[11] 'active' key
|
||||
|
||||
ARGV[1] next milliseconds
|
||||
ARGV[2] msgpacked options
|
||||
[1] name
|
||||
[2] tz?
|
||||
[3] pattern?
|
||||
[4] endDate?
|
||||
[5] every?
|
||||
ARGV[3] jobs scheduler id
|
||||
ARGV[4] Json stringified template data
|
||||
ARGV[5] mspacked template opts
|
||||
ARGV[6] msgpacked delayed opts
|
||||
ARGV[7] timestamp
|
||||
ARGV[8] prefix key
|
||||
ARGV[9] producer key
|
||||
|
||||
Output:
|
||||
repeatableKey - OK
|
||||
]] local rcall = redis.call
|
||||
local repeatKey = KEYS[1]
|
||||
local delayedKey = KEYS[2]
|
||||
local waitKey = KEYS[3]
|
||||
local pausedKey = KEYS[4]
|
||||
local metaKey = KEYS[5]
|
||||
local prioritizedKey = KEYS[6]
|
||||
local eventsKey = KEYS[9]
|
||||
|
||||
local nextMillis = ARGV[1]
|
||||
local jobSchedulerId = ARGV[3]
|
||||
local templateOpts = cmsgpack.unpack(ARGV[5])
|
||||
local now = tonumber(ARGV[7])
|
||||
local prefixKey = ARGV[8]
|
||||
local jobOpts = cmsgpack.unpack(ARGV[6])
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobFromScheduler"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/isQueuePaused"
|
||||
--- @include "includes/removeJob"
|
||||
--- @include "includes/storeJobScheduler"
|
||||
--- @include "includes/getJobSchedulerEveryNextMillis"
|
||||
|
||||
-- If we are overriding a repeatable job we must delete the delayed job for
|
||||
-- the next iteration.
|
||||
local schedulerKey = repeatKey .. ":" .. jobSchedulerId
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local templateData = ARGV[4]
|
||||
|
||||
local prevMillis = rcall("ZSCORE", repeatKey, jobSchedulerId)
|
||||
if prevMillis then
|
||||
prevMillis = tonumber(prevMillis)
|
||||
end
|
||||
local schedulerOpts = cmsgpack.unpack(ARGV[2])
|
||||
|
||||
local every = schedulerOpts['every']
|
||||
|
||||
-- For backwards compatibility we also check the offset from the job itself.
|
||||
-- could be removed in future major versions.
|
||||
local jobOffset = jobOpts['repeat'] and jobOpts['repeat']['offset'] or 0
|
||||
local offset = schedulerOpts['offset'] or jobOffset or 0
|
||||
local newOffset = offset
|
||||
|
||||
local updatedEvery = false
|
||||
if every then
|
||||
-- if we changed the 'every' value we need to reset millis to nil
|
||||
local millis = prevMillis
|
||||
if prevMillis then
|
||||
local prevEvery = tonumber(rcall("HGET", schedulerKey, "every"))
|
||||
if prevEvery ~= every then
|
||||
millis = nil
|
||||
updatedEvery = true
|
||||
end
|
||||
end
|
||||
|
||||
local startDate = schedulerOpts['startDate']
|
||||
nextMillis, newOffset = getJobSchedulerEveryNextMillis(millis, every, now, offset, startDate)
|
||||
end
|
||||
|
||||
local function removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, jobId, metaKey,
|
||||
eventsKey)
|
||||
if rcall("ZSCORE", delayedKey, jobId) then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
rcall("ZREM", delayedKey, jobId)
|
||||
return true
|
||||
elseif rcall("ZSCORE", prioritizedKey, jobId) then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
rcall("ZREM", prioritizedKey, jobId)
|
||||
return true
|
||||
else
|
||||
local pausedOrWaitKey = waitKey
|
||||
if isQueuePaused(metaKey) then
|
||||
pausedOrWaitKey = pausedKey
|
||||
end
|
||||
|
||||
if rcall("LREM", pausedOrWaitKey, 1, jobId) > 0 then
|
||||
removeJob(jobId, true, prefixKey, true --[[remove debounce key]] )
|
||||
return true
|
||||
end
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local removedPrevJob = false
|
||||
if prevMillis then
|
||||
local currentJobId = "repeat:" .. jobSchedulerId .. ":" .. prevMillis
|
||||
local currentJobKey = schedulerKey .. ":" .. prevMillis
|
||||
|
||||
-- In theory it should always exist the currentJobKey if there is a prevMillis unless something has
|
||||
-- gone really wrong.
|
||||
if rcall("EXISTS", currentJobKey) == 1 then
|
||||
removedPrevJob = removeJobFromScheduler(prefixKey, delayedKey, prioritizedKey, waitKey, pausedKey, currentJobId,
|
||||
metaKey, eventsKey)
|
||||
end
|
||||
end
|
||||
|
||||
if removedPrevJob then
|
||||
-- The jobs has been removed and we want to replace it, so lets use the same millis.
|
||||
if every and not updatedEvery then
|
||||
nextMillis = prevMillis
|
||||
end
|
||||
else
|
||||
-- Special case where no job was removed, and we need to add the next iteration.
|
||||
schedulerOpts['offset'] = newOffset
|
||||
end
|
||||
|
||||
-- Check for job ID collision with existing jobs (in any state)
|
||||
local jobId = "repeat:" .. jobSchedulerId .. ":" .. nextMillis
|
||||
local jobKey = prefixKey .. jobId
|
||||
|
||||
-- If there's already a job with this ID, in a state
|
||||
-- that is not updatable (active, completed, failed) we must
|
||||
-- handle the collision
|
||||
local hasCollision = false
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
if every then
|
||||
-- For 'every' case: try next time slot to avoid collision
|
||||
local nextSlotMillis = nextMillis + every
|
||||
local nextSlotJobId = "repeat:" .. jobSchedulerId .. ":" .. nextSlotMillis
|
||||
local nextSlotJobKey = prefixKey .. nextSlotJobId
|
||||
|
||||
if rcall("EXISTS", nextSlotJobKey) == 0 then
|
||||
-- Next slot is free, use it
|
||||
nextMillis = nextSlotMillis
|
||||
jobId = nextSlotJobId
|
||||
else
|
||||
-- Next slot also has a job, return error code
|
||||
return -11 -- SchedulerJobSlotsBusy
|
||||
end
|
||||
else
|
||||
hasCollision = true
|
||||
end
|
||||
end
|
||||
|
||||
local delay = nextMillis - now
|
||||
|
||||
-- Fast Clamp delay to minimum of 0
|
||||
if delay < 0 then
|
||||
delay = 0
|
||||
end
|
||||
|
||||
local nextJobKey = schedulerKey .. ":" .. nextMillis
|
||||
|
||||
if not hasCollision or removedPrevJob then
|
||||
-- jobId already calculated above during collision check
|
||||
|
||||
storeJobScheduler(jobSchedulerId, schedulerKey, repeatKey, nextMillis, schedulerOpts, templateData, templateOpts)
|
||||
|
||||
rcall("INCR", KEYS[8])
|
||||
|
||||
addJobFromScheduler(nextJobKey, jobId, jobOpts, waitKey, pausedKey, KEYS[11], metaKey, prioritizedKey, KEYS[10],
|
||||
delayedKey, KEYS[7], eventsKey, schedulerOpts['name'], maxEvents, now, templateData, jobSchedulerId, delay)
|
||||
elseif hasCollision then
|
||||
-- For 'pattern' case: return error code
|
||||
return -10 -- SchedulerJobIdCollision
|
||||
end
|
||||
|
||||
if ARGV[9] ~= "" then
|
||||
rcall("HSET", ARGV[9], "nrjid", jobId)
|
||||
end
|
||||
|
||||
return {jobId .. "", delay}
|
||||
30
backend/node_modules/bullmq/dist/cjs/commands/addLog-2.lua
generated
vendored
Normal file
30
backend/node_modules/bullmq/dist/cjs/commands/addLog-2.lua
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
--[[
|
||||
Add job log
|
||||
|
||||
Input:
|
||||
KEYS[1] job id key
|
||||
KEYS[2] job logs key
|
||||
|
||||
ARGV[1] id
|
||||
ARGV[2] log
|
||||
ARGV[3] keepLogs
|
||||
|
||||
Output:
|
||||
-1 - Missing job.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("EXISTS", KEYS[1]) == 1 then -- // Make sure job exists
|
||||
local logCount = rcall("RPUSH", KEYS[2], ARGV[2])
|
||||
|
||||
if ARGV[3] ~= '' then
|
||||
local keepLogs = tonumber(ARGV[3])
|
||||
rcall("LTRIM", KEYS[2], -keepLogs, -1)
|
||||
|
||||
return math.min(keepLogs, logCount)
|
||||
end
|
||||
|
||||
return logCount
|
||||
else
|
||||
return -1
|
||||
end
|
||||
98
backend/node_modules/bullmq/dist/cjs/commands/addParentJob-6.lua
generated
vendored
Normal file
98
backend/node_modules/bullmq/dist/cjs/commands/addParentJob-6.lua
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
--[[
|
||||
Adds a parent job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
- adds the job to the waiting-children zset
|
||||
|
||||
Input:
|
||||
KEYS[1] 'meta'
|
||||
KEYS[2] 'id'
|
||||
KEYS[3] 'delayed'
|
||||
KEYS[4] 'waiting-children'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] events stream key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[1]
|
||||
local idKey = KEYS[2]
|
||||
|
||||
local completedKey = KEYS[5]
|
||||
local eventsKey = KEYS[6]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
|
||||
local waitChildrenKey = KEYS[4]
|
||||
rcall("ZADD", waitChildrenKey, timestamp, jobId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event",
|
||||
"waiting-children", "jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
117
backend/node_modules/bullmq/dist/cjs/commands/addPrioritizedJob-9.lua
generated
vendored
Normal file
117
backend/node_modules/bullmq/dist/cjs/commands/addPrioritizedJob-9.lua
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
--[[
|
||||
Adds a priotitized job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
- Adds the job to the "added" list so that workers gets notified.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'marker',
|
||||
KEYS[2] 'meta'
|
||||
KEYS[3] 'id'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'delayed'
|
||||
KEYS[6] 'completed'
|
||||
KEYS[7] 'active'
|
||||
KEYS[8] events stream key
|
||||
KEYS[9] 'pc' priority counter
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local metaKey = KEYS[2]
|
||||
local idKey = KEYS[3]
|
||||
local priorityKey = KEYS[4]
|
||||
|
||||
local completedKey = KEYS[6]
|
||||
local activeKey = KEYS[7]
|
||||
local eventsKey = KEYS[8]
|
||||
local priorityCounterKey = KEYS[9]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobWithPriority"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/storeJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/isQueuePausedOrMaxed"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", idKey)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[5],
|
||||
deduplicationKey, eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
local delay, priority = storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2],
|
||||
opts, timestamp, parentKey, parentData,
|
||||
repeatJobKey)
|
||||
|
||||
-- Add the job to the prioritized set
|
||||
local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, activeKey)
|
||||
addJobWithPriority( KEYS[1], priorityKey, priority, jobId, priorityCounterKey, isPausedOrMaxed)
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
|
||||
"jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
84
backend/node_modules/bullmq/dist/cjs/commands/addRepeatableJob-2.lua
generated
vendored
Normal file
84
backend/node_modules/bullmq/dist/cjs/commands/addRepeatableJob-2.lua
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
--[[
|
||||
Adds a repeatable job
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
KEYS[2] 'delayed' key
|
||||
|
||||
ARGV[1] next milliseconds
|
||||
ARGV[2] msgpacked options
|
||||
[1] name
|
||||
[2] tz?
|
||||
[3] pattern?
|
||||
[4] endDate?
|
||||
[5] every?
|
||||
ARGV[3] legacy custom key TODO: remove this logic in next breaking change
|
||||
ARGV[4] custom key
|
||||
ARGV[5] prefix key
|
||||
|
||||
Output:
|
||||
repeatableKey - OK
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local repeatKey = KEYS[1]
|
||||
local delayedKey = KEYS[2]
|
||||
|
||||
local nextMillis = ARGV[1]
|
||||
local legacyCustomKey = ARGV[3]
|
||||
local customKey = ARGV[4]
|
||||
local prefixKey = ARGV[5]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/removeJob"
|
||||
|
||||
local function storeRepeatableJob(repeatKey, customKey, nextMillis, rawOpts)
|
||||
rcall("ZADD", repeatKey, nextMillis, customKey)
|
||||
local opts = cmsgpack.unpack(rawOpts)
|
||||
|
||||
local optionalValues = {}
|
||||
if opts['tz'] then
|
||||
table.insert(optionalValues, "tz")
|
||||
table.insert(optionalValues, opts['tz'])
|
||||
end
|
||||
|
||||
if opts['pattern'] then
|
||||
table.insert(optionalValues, "pattern")
|
||||
table.insert(optionalValues, opts['pattern'])
|
||||
end
|
||||
|
||||
if opts['endDate'] then
|
||||
table.insert(optionalValues, "endDate")
|
||||
table.insert(optionalValues, opts['endDate'])
|
||||
end
|
||||
|
||||
if opts['every'] then
|
||||
table.insert(optionalValues, "every")
|
||||
table.insert(optionalValues, opts['every'])
|
||||
end
|
||||
|
||||
rcall("HMSET", repeatKey .. ":" .. customKey, "name", opts['name'],
|
||||
unpack(optionalValues))
|
||||
|
||||
return customKey
|
||||
end
|
||||
|
||||
-- If we are overriding a repeatable job we must delete the delayed job for
|
||||
-- the next iteration.
|
||||
local prevMillis = rcall("ZSCORE", repeatKey, customKey)
|
||||
if prevMillis then
|
||||
local delayedJobId = "repeat:" .. customKey .. ":" .. prevMillis
|
||||
local nextDelayedJobId = repeatKey .. ":" .. customKey .. ":" .. nextMillis
|
||||
|
||||
if rcall("ZSCORE", delayedKey, delayedJobId)
|
||||
and rcall("EXISTS", nextDelayedJobId) ~= 1 then
|
||||
removeJob(delayedJobId, true, prefixKey, true --[[remove debounce key]])
|
||||
rcall("ZREM", delayedKey, delayedJobId)
|
||||
end
|
||||
end
|
||||
|
||||
-- Keep backwards compatibility with old repeatable jobs (<= 3.0.0)
|
||||
if rcall("ZSCORE", repeatKey, legacyCustomKey) ~= false then
|
||||
return storeRepeatableJob(repeatKey, legacyCustomKey, nextMillis, ARGV[2])
|
||||
end
|
||||
|
||||
return storeRepeatableJob(repeatKey, customKey, nextMillis, ARGV[2])
|
||||
122
backend/node_modules/bullmq/dist/cjs/commands/addStandardJob-9.lua
generated
vendored
Normal file
122
backend/node_modules/bullmq/dist/cjs/commands/addStandardJob-9.lua
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
--[[
|
||||
Adds a job to the queue by doing the following:
|
||||
- Increases the job counter if needed.
|
||||
- Creates a new job key with the job data.
|
||||
|
||||
- if delayed:
|
||||
- computes timestamp.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
- if not delayed
|
||||
- Adds the jobId to the wait/paused list in one of three ways:
|
||||
- LIFO
|
||||
- FIFO
|
||||
- prioritized.
|
||||
- Adds the job to the "added" list so that workers gets notified.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'meta'
|
||||
KEYS[4] 'id'
|
||||
KEYS[5] 'completed'
|
||||
KEYS[6] 'delayed'
|
||||
KEYS[7] 'active'
|
||||
KEYS[8] events stream key
|
||||
KEYS[9] marker key
|
||||
|
||||
ARGV[1] msgpacked arguments array
|
||||
[1] key prefix,
|
||||
[2] custom id (will not generate one automatically)
|
||||
[3] name
|
||||
[4] timestamp
|
||||
[5] parentKey?
|
||||
[6] parent dependencies key.
|
||||
[7] parent? {id, queueKey}
|
||||
[8] repeat job key
|
||||
[9] deduplication key
|
||||
|
||||
ARGV[2] Json stringified job data
|
||||
ARGV[3] msgpacked options
|
||||
|
||||
Output:
|
||||
jobId - OK
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local eventsKey = KEYS[8]
|
||||
|
||||
local jobId
|
||||
local jobIdKey
|
||||
local rcall = redis.call
|
||||
|
||||
local args = cmsgpack.unpack(ARGV[1])
|
||||
|
||||
local data = ARGV[2]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
local parentKey = args[5]
|
||||
local parent = args[7]
|
||||
local repeatJobKey = args[8]
|
||||
local deduplicationKey = args[9]
|
||||
local parentData
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/deduplicateJob"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/handleDuplicatedJob"
|
||||
--- @include "includes/storeJob"
|
||||
|
||||
if parentKey ~= nil then
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
parentData = cjson.encode(parent)
|
||||
end
|
||||
|
||||
local jobCounter = rcall("INCR", KEYS[4])
|
||||
|
||||
local metaKey = KEYS[3]
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
local parentDependenciesKey = args[6]
|
||||
local timestamp = args[4]
|
||||
if args[2] == "" then
|
||||
jobId = jobCounter
|
||||
jobIdKey = args[1] .. jobId
|
||||
else
|
||||
jobId = args[2]
|
||||
jobIdKey = args[1] .. jobId
|
||||
if rcall("EXISTS", jobIdKey) == 1 then
|
||||
return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent,
|
||||
parentData, parentDependenciesKey, KEYS[5], eventsKey,
|
||||
maxEvents, timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local deduplicationJobId = deduplicateJob(opts['de'], jobId, KEYS[6],
|
||||
deduplicationKey, eventsKey, maxEvents, args[1])
|
||||
if deduplicationJobId then
|
||||
return deduplicationJobId
|
||||
end
|
||||
|
||||
-- Store the job.
|
||||
storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[7], KEYS[1], KEYS[2])
|
||||
|
||||
-- LIFO or FIFO
|
||||
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(target, KEYS[9], pushCmd, isPausedOrMaxed, jobId)
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting",
|
||||
"jobId", jobId)
|
||||
|
||||
-- Check if this job is a child of another job, if so add it to the parents dependencies
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
55
backend/node_modules/bullmq/dist/cjs/commands/changeDelay-4.lua
generated
vendored
Normal file
55
backend/node_modules/bullmq/dist/cjs/commands/changeDelay-4.lua
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
--[[
|
||||
Change job delay when it is in delayed set.
|
||||
Input:
|
||||
KEYS[1] delayed key
|
||||
KEYS[2] meta key
|
||||
KEYS[3] marker key
|
||||
KEYS[4] events stream
|
||||
|
||||
ARGV[1] delay
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] the id of the job
|
||||
ARGV[4] job key
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-1 - Missing job.
|
||||
-3 - Job not in delayed set.
|
||||
|
||||
Events:
|
||||
- delayed key.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayMarkerIfNeeded"
|
||||
--- @include "includes/getDelayedScore"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
|
||||
if rcall("EXISTS", ARGV[4]) == 1 then
|
||||
local jobId = ARGV[3]
|
||||
|
||||
local delay = tonumber(ARGV[1])
|
||||
local score, delayedTimestamp = getDelayedScore(KEYS[1], ARGV[2], delay)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", KEYS[1], jobId)
|
||||
|
||||
if numRemovedElements < 1 then
|
||||
return -3
|
||||
end
|
||||
|
||||
rcall("HSET", ARGV[4], "delay", delay)
|
||||
rcall("ZADD", KEYS[1], score, jobId)
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(KEYS[2])
|
||||
|
||||
rcall("XADD", KEYS[4], "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- mark that a delayed job is available
|
||||
addDelayMarkerIfNeeded(KEYS[3], KEYS[1])
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
68
backend/node_modules/bullmq/dist/cjs/commands/changePriority-7.lua
generated
vendored
Normal file
68
backend/node_modules/bullmq/dist/cjs/commands/changePriority-7.lua
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
--[[
|
||||
Change job priority
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'meta'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'active'
|
||||
KEYS[6] 'pc' priority counter
|
||||
KEYS[7] 'marker'
|
||||
|
||||
ARGV[1] priority value
|
||||
ARGV[2] prefix key
|
||||
ARGV[3] job id
|
||||
ARGV[4] lifo
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-1 - Missing job
|
||||
]]
|
||||
local jobId = ARGV[3]
|
||||
local jobKey = ARGV[2] .. jobId
|
||||
local priority = tonumber(ARGV[1])
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/addJobWithPriority"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/pushBackJobWithPriority"
|
||||
|
||||
local function reAddJobWithNewPriority( prioritizedKey, markerKey, targetKey,
|
||||
priorityCounter, lifo, priority, jobId, isPausedOrMaxed)
|
||||
if priority == 0 then
|
||||
local pushCmd = lifo and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
else
|
||||
if lifo then
|
||||
pushBackJobWithPriority(prioritizedKey, priority, jobId)
|
||||
else
|
||||
addJobWithPriority(markerKey, prioritizedKey, priority, jobId,
|
||||
priorityCounter, isPausedOrMaxed)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
local metaKey = KEYS[3]
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[5], KEYS[1], KEYS[2])
|
||||
local prioritizedKey = KEYS[4]
|
||||
local priorityCounterKey = KEYS[6]
|
||||
local markerKey = KEYS[7]
|
||||
|
||||
-- Re-add with the new priority
|
||||
if rcall("ZREM", prioritizedKey, jobId) > 0 then
|
||||
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
|
||||
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
|
||||
elseif rcall("LREM", target, -1, jobId) > 0 then
|
||||
reAddJobWithNewPriority( prioritizedKey, markerKey, target,
|
||||
priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed)
|
||||
end
|
||||
|
||||
rcall("HSET", jobKey, "priority", priority)
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
59
backend/node_modules/bullmq/dist/cjs/commands/cleanJobsInSet-3.lua
generated
vendored
Normal file
59
backend/node_modules/bullmq/dist/cjs/commands/cleanJobsInSet-3.lua
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
--[[
|
||||
Remove jobs from the specific set.
|
||||
|
||||
Input:
|
||||
KEYS[1] set key,
|
||||
KEYS[2] events stream key
|
||||
KEYS[3] repeat key
|
||||
|
||||
ARGV[1] jobKey prefix
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] limit the number of jobs to be removed. 0 is unlimited
|
||||
ARGV[4] set name, can be any of 'wait', 'active', 'paused', 'delayed', 'completed', or 'failed'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local repeatKey = KEYS[3]
|
||||
local rangeStart = 0
|
||||
local rangeEnd = -1
|
||||
|
||||
local limit = tonumber(ARGV[3])
|
||||
|
||||
-- If we're only deleting _n_ items, avoid retrieving all items
|
||||
-- for faster performance
|
||||
--
|
||||
-- Start from the tail of the list, since that's where oldest elements
|
||||
-- are generally added for FIFO lists
|
||||
if limit > 0 then
|
||||
rangeStart = -1 - limit + 1
|
||||
rangeEnd = -1
|
||||
end
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/cleanList"
|
||||
--- @include "includes/cleanSet"
|
||||
|
||||
local result
|
||||
if ARGV[4] == "active" then
|
||||
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], false --[[ hasFinished ]],
|
||||
repeatKey)
|
||||
elseif ARGV[4] == "delayed" then
|
||||
rangeEnd = "+inf"
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"processedOn", "timestamp"}, false --[[ hasFinished ]], repeatKey)
|
||||
elseif ARGV[4] == "prioritized" then
|
||||
rangeEnd = "+inf"
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"timestamp"}, false --[[ hasFinished ]], repeatKey)
|
||||
elseif ARGV[4] == "wait" or ARGV[4] == "paused" then
|
||||
result = cleanList(KEYS[1], ARGV[1], rangeStart, rangeEnd, ARGV[2], true --[[ hasFinished ]],
|
||||
repeatKey)
|
||||
else
|
||||
rangeEnd = ARGV[2]
|
||||
-- No need to pass repeat key as in that moment job won't be related to a job scheduler
|
||||
result = cleanSet(KEYS[1], ARGV[1], rangeEnd, ARGV[2], limit,
|
||||
{"finishedOn"}, true --[[ hasFinished ]])
|
||||
end
|
||||
|
||||
rcall("XADD", KEYS[2], "*", "event", "cleaned", "count", result[2])
|
||||
|
||||
return result[1]
|
||||
41
backend/node_modules/bullmq/dist/cjs/commands/drain-5.lua
generated
vendored
Normal file
41
backend/node_modules/bullmq/dist/cjs/commands/drain-5.lua
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
--[[
|
||||
Drains the queue, removes all jobs that are waiting
|
||||
or delayed, but not active, completed or failed
|
||||
|
||||
Input:
|
||||
KEYS[1] 'wait',
|
||||
KEYS[2] 'paused'
|
||||
KEYS[3] 'delayed'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] 'jobschedulers' (repeat)
|
||||
|
||||
ARGV[1] queue key prefix
|
||||
ARGV[2] should clean delayed jobs
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local queueBaseKey = ARGV[1]
|
||||
|
||||
--- @include "includes/removeListJobs"
|
||||
--- @include "includes/removeZSetJobs"
|
||||
|
||||
-- We must not remove delayed jobs if they are associated to a job scheduler.
|
||||
local scheduledJobs = {}
|
||||
local jobSchedulers = rcall("ZRANGE", KEYS[5], 0, -1, "WITHSCORES")
|
||||
|
||||
-- For every job scheduler, get the current delayed job id.
|
||||
for i = 1, #jobSchedulers, 2 do
|
||||
local jobSchedulerId = jobSchedulers[i]
|
||||
local jobSchedulerMillis = jobSchedulers[i + 1]
|
||||
|
||||
local delayedJobId = "repeat:" .. jobSchedulerId .. ":" .. jobSchedulerMillis
|
||||
scheduledJobs[delayedJobId] = true
|
||||
end
|
||||
|
||||
removeListJobs(KEYS[1], true, queueBaseKey, 0, scheduledJobs) -- wait
|
||||
removeListJobs(KEYS[2], true, queueBaseKey, 0, scheduledJobs) -- paused
|
||||
|
||||
if ARGV[2] == "1" then
|
||||
removeZSetJobs(KEYS[3], true, queueBaseKey, 0, scheduledJobs) -- delayed
|
||||
end
|
||||
|
||||
removeZSetJobs(KEYS[4], true, queueBaseKey, 0, scheduledJobs) -- prioritized
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/extendLock-2.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/extendLock-2.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Extend lock and removes the job from the stalled set.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'lock',
|
||||
KEYS[2] 'stalled'
|
||||
|
||||
ARGV[1] token
|
||||
ARGV[2] lock duration in milliseconds
|
||||
ARGV[3] jobid
|
||||
|
||||
Output:
|
||||
"1" if lock extented succesfully.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
if rcall("GET", KEYS[1]) == ARGV[1] then
|
||||
-- if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2], "XX") then
|
||||
if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) then
|
||||
rcall("SREM", KEYS[2], ARGV[3])
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return 0
|
||||
48
backend/node_modules/bullmq/dist/cjs/commands/extendLocks-1.lua
generated
vendored
Normal file
48
backend/node_modules/bullmq/dist/cjs/commands/extendLocks-1.lua
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
--[[
|
||||
Extend locks for multiple jobs and remove them from the stalled set if successful.
|
||||
Return the list of job IDs for which the operation failed.
|
||||
|
||||
KEYS[1] = stalled key
|
||||
|
||||
ARGV[1] = baseKey
|
||||
ARGV[2] = tokens
|
||||
ARGV[3] = jobIds
|
||||
ARGV[4] = lockDuration (ms)
|
||||
|
||||
Output:
|
||||
An array of failed job IDs. If empty, all succeeded.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
local stalledKey = KEYS[1]
|
||||
local baseKey = ARGV[1]
|
||||
local tokens = cmsgpack.unpack(ARGV[2])
|
||||
local jobIds = cmsgpack.unpack(ARGV[3])
|
||||
local lockDuration = ARGV[4]
|
||||
|
||||
local jobCount = #jobIds
|
||||
local failedJobs = {}
|
||||
|
||||
for i = 1, jobCount, 1 do
|
||||
local lockKey = baseKey .. jobIds[i] .. ':lock'
|
||||
local jobId = jobIds[i]
|
||||
local token = tokens[i]
|
||||
|
||||
local currentToken = rcall("GET", lockKey)
|
||||
if currentToken then
|
||||
if currentToken == token then
|
||||
local setResult = rcall("SET", lockKey, token, "PX", lockDuration)
|
||||
if setResult then
|
||||
rcall("SREM", stalledKey, jobId)
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
else
|
||||
table.insert(failedJobs, jobId)
|
||||
end
|
||||
end
|
||||
|
||||
return failedJobs
|
||||
36
backend/node_modules/bullmq/dist/cjs/commands/getCounts-1.lua
generated
vendored
Normal file
36
backend/node_modules/bullmq/dist/cjs/commands/getCounts-1.lua
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
--[[
|
||||
Get counts per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] 'prefix'
|
||||
|
||||
ARGV[1...] types
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local prefix = KEYS[1]
|
||||
local results = {}
|
||||
|
||||
for i = 1, #ARGV do
|
||||
local stateKey = prefix .. ARGV[i]
|
||||
if ARGV[i] == "wait" or ARGV[i] == "paused" then
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
local marker = rcall("LINDEX", stateKey, -1)
|
||||
if marker and string.sub(marker, 1, 2) == "0:" then
|
||||
local count = rcall("LLEN", stateKey)
|
||||
if count > 1 then
|
||||
rcall("RPOP", stateKey)
|
||||
results[#results+1] = count-1
|
||||
else
|
||||
results[#results+1] = 0
|
||||
end
|
||||
else
|
||||
results[#results+1] = rcall("LLEN", stateKey)
|
||||
end
|
||||
elseif ARGV[i] == "active" then
|
||||
results[#results+1] = rcall("LLEN", stateKey)
|
||||
else
|
||||
results[#results+1] = rcall("ZCARD", stateKey)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
35
backend/node_modules/bullmq/dist/cjs/commands/getCountsPerPriority-4.lua
generated
vendored
Normal file
35
backend/node_modules/bullmq/dist/cjs/commands/getCountsPerPriority-4.lua
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
--[[
|
||||
Get counts per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] wait key
|
||||
KEYS[2] paused key
|
||||
KEYS[3] meta key
|
||||
KEYS[4] prioritized key
|
||||
|
||||
ARGV[1...] priorities
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local results = {}
|
||||
local waitKey = KEYS[1]
|
||||
local pausedKey = KEYS[2]
|
||||
local prioritizedKey = KEYS[4]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/isQueuePaused"
|
||||
|
||||
for i = 1, #ARGV do
|
||||
local priority = tonumber(ARGV[i])
|
||||
if priority == 0 then
|
||||
if isQueuePaused(KEYS[3]) then
|
||||
results[#results+1] = rcall("LLEN", pausedKey)
|
||||
else
|
||||
results[#results+1] = rcall("LLEN", waitKey)
|
||||
end
|
||||
else
|
||||
results[#results+1] = rcall("ZCOUNT", prioritizedKey,
|
||||
priority * 0x100000000, (priority + 1) * 0x100000000 - 1)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
31
backend/node_modules/bullmq/dist/cjs/commands/getDependencyCounts-4.lua
generated
vendored
Normal file
31
backend/node_modules/bullmq/dist/cjs/commands/getDependencyCounts-4.lua
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
--[[
|
||||
Get counts per child states
|
||||
|
||||
Input:
|
||||
KEYS[1] processed key
|
||||
KEYS[2] unprocessed key
|
||||
KEYS[3] ignored key
|
||||
KEYS[4] failed key
|
||||
|
||||
ARGV[1...] types
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local processedKey = KEYS[1]
|
||||
local unprocessedKey = KEYS[2]
|
||||
local ignoredKey = KEYS[3]
|
||||
local failedKey = KEYS[4]
|
||||
local results = {}
|
||||
|
||||
for i = 1, #ARGV do
|
||||
if ARGV[i] == "processed" then
|
||||
results[#results+1] = rcall("HLEN", processedKey)
|
||||
elseif ARGV[i] == "unprocessed" then
|
||||
results[#results+1] = rcall("SCARD", unprocessedKey)
|
||||
elseif ARGV[i] == "ignored" then
|
||||
results[#results+1] = rcall("HLEN", ignoredKey)
|
||||
else
|
||||
results[#results+1] = rcall("ZCARD", failedKey)
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/getJobScheduler-1.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/getJobScheduler-1.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Get job scheduler record.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'repeat' key
|
||||
|
||||
ARGV[1] id
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
local jobSchedulerKey = KEYS[1] .. ":" .. ARGV[1]
|
||||
|
||||
local score = rcall("ZSCORE", KEYS[1], ARGV[1])
|
||||
|
||||
if score then
|
||||
return {rcall("HGETALL", jobSchedulerKey), score} -- get job data
|
||||
end
|
||||
|
||||
return {nil, nil}
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/getMetrics-2.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/getMetrics-2.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Get metrics
|
||||
|
||||
Input:
|
||||
KEYS[1] 'metrics' key
|
||||
KEYS[2] 'metrics data' key
|
||||
|
||||
ARGV[1] start index
|
||||
ARGV[2] end index
|
||||
]]
|
||||
local rcall = redis.call;
|
||||
local metricsKey = KEYS[1]
|
||||
local dataKey = KEYS[2]
|
||||
|
||||
local metrics = rcall("HMGET", metricsKey, "count", "prevTS", "prevCount")
|
||||
local data = rcall("LRANGE", dataKey, tonumber(ARGV[1]), tonumber(ARGV[2]))
|
||||
local numPoints = rcall("LLEN", dataKey)
|
||||
|
||||
return {metrics, data, numPoints}
|
||||
70
backend/node_modules/bullmq/dist/cjs/commands/getRanges-1.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/cjs/commands/getRanges-1.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Get job ids per provided states
|
||||
|
||||
Input:
|
||||
KEYS[1] 'prefix'
|
||||
|
||||
ARGV[1] start
|
||||
ARGV[2] end
|
||||
ARGV[3] asc
|
||||
ARGV[4...] types
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local prefix = KEYS[1]
|
||||
local rangeStart = tonumber(ARGV[1])
|
||||
local rangeEnd = tonumber(ARGV[2])
|
||||
local asc = ARGV[3]
|
||||
local results = {}
|
||||
|
||||
local function getRangeInList(listKey, asc, rangeStart, rangeEnd, results)
|
||||
if asc == "1" then
|
||||
local modifiedRangeStart
|
||||
local modifiedRangeEnd
|
||||
if rangeStart == -1 then
|
||||
modifiedRangeStart = 0
|
||||
else
|
||||
modifiedRangeStart = -(rangeStart + 1)
|
||||
end
|
||||
|
||||
if rangeEnd == -1 then
|
||||
modifiedRangeEnd = 0
|
||||
else
|
||||
modifiedRangeEnd = -(rangeEnd + 1)
|
||||
end
|
||||
|
||||
results[#results+1] = rcall("LRANGE", listKey,
|
||||
modifiedRangeEnd,
|
||||
modifiedRangeStart)
|
||||
else
|
||||
results[#results+1] = rcall("LRANGE", listKey, rangeStart, rangeEnd)
|
||||
end
|
||||
end
|
||||
|
||||
for i = 4, #ARGV do
|
||||
local stateKey = prefix .. ARGV[i]
|
||||
if ARGV[i] == "wait" or ARGV[i] == "paused" then
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
local marker = rcall("LINDEX", stateKey, -1)
|
||||
if marker and string.sub(marker, 1, 2) == "0:" then
|
||||
local count = rcall("LLEN", stateKey)
|
||||
if count > 1 then
|
||||
rcall("RPOP", stateKey)
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
else
|
||||
results[#results+1] = {}
|
||||
end
|
||||
else
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
end
|
||||
elseif ARGV[i] == "active" then
|
||||
getRangeInList(stateKey, asc, rangeStart, rangeEnd, results)
|
||||
else
|
||||
if asc == "1" then
|
||||
results[#results+1] = rcall("ZRANGE", stateKey, rangeStart, rangeEnd)
|
||||
else
|
||||
results[#results+1] = rcall("ZREVRANGE", stateKey, rangeStart, rangeEnd)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return results
|
||||
26
backend/node_modules/bullmq/dist/cjs/commands/getRateLimitTtl-2.lua
generated
vendored
Normal file
26
backend/node_modules/bullmq/dist/cjs/commands/getRateLimitTtl-2.lua
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
--[[
|
||||
Get rate limit ttl
|
||||
|
||||
Input:
|
||||
KEYS[1] 'limiter'
|
||||
KEYS[2] 'meta'
|
||||
|
||||
ARGV[1] maxJobs
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/getRateLimitTTL"
|
||||
|
||||
local rateLimiterKey = KEYS[1]
|
||||
if ARGV[1] ~= "0" then
|
||||
return getRateLimitTTL(tonumber(ARGV[1]), rateLimiterKey)
|
||||
else
|
||||
local rateLimitMax = rcall("HGET", KEYS[2], "max")
|
||||
if rateLimitMax then
|
||||
return getRateLimitTTL(tonumber(rateLimitMax), rateLimiterKey)
|
||||
end
|
||||
|
||||
return rcall("PTTL", rateLimiterKey)
|
||||
end
|
||||
65
backend/node_modules/bullmq/dist/cjs/commands/getState-8.lua
generated
vendored
Normal file
65
backend/node_modules/bullmq/dist/cjs/commands/getState-8.lua
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
--[[
|
||||
Get a job state
|
||||
|
||||
Input:
|
||||
KEYS[1] 'completed' key,
|
||||
KEYS[2] 'failed' key
|
||||
KEYS[3] 'delayed' key
|
||||
KEYS[4] 'active' key
|
||||
KEYS[5] 'wait' key
|
||||
KEYS[6] 'paused' key
|
||||
KEYS[7] 'waiting-children' key
|
||||
KEYS[8] 'prioritized' key
|
||||
|
||||
ARGV[1] job id
|
||||
Output:
|
||||
'completed'
|
||||
'failed'
|
||||
'delayed'
|
||||
'active'
|
||||
'prioritized'
|
||||
'waiting'
|
||||
'waiting-children'
|
||||
'unknown'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("ZSCORE", KEYS[1], ARGV[1]) then
|
||||
return "completed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[2], ARGV[1]) then
|
||||
return "failed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[3], ARGV[1]) then
|
||||
return "delayed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[8], ARGV[1]) then
|
||||
return "prioritized"
|
||||
end
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/checkItemInList"
|
||||
|
||||
local active_items = rcall("LRANGE", KEYS[4] , 0, -1)
|
||||
if checkItemInList(active_items, ARGV[1]) ~= nil then
|
||||
return "active"
|
||||
end
|
||||
|
||||
local wait_items = rcall("LRANGE", KEYS[5] , 0, -1)
|
||||
if checkItemInList(wait_items, ARGV[1]) ~= nil then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
local paused_items = rcall("LRANGE", KEYS[6] , 0, -1)
|
||||
if checkItemInList(paused_items, ARGV[1]) ~= nil then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[7], ARGV[1]) then
|
||||
return "waiting-children"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
58
backend/node_modules/bullmq/dist/cjs/commands/getStateV2-8.lua
generated
vendored
Normal file
58
backend/node_modules/bullmq/dist/cjs/commands/getStateV2-8.lua
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
--[[
|
||||
Get a job state
|
||||
|
||||
Input:
|
||||
KEYS[1] 'completed' key,
|
||||
KEYS[2] 'failed' key
|
||||
KEYS[3] 'delayed' key
|
||||
KEYS[4] 'active' key
|
||||
KEYS[5] 'wait' key
|
||||
KEYS[6] 'paused' key
|
||||
KEYS[7] 'waiting-children' key
|
||||
KEYS[8] 'prioritized' key
|
||||
|
||||
ARGV[1] job id
|
||||
Output:
|
||||
'completed'
|
||||
'failed'
|
||||
'delayed'
|
||||
'active'
|
||||
'waiting'
|
||||
'waiting-children'
|
||||
'unknown'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("ZSCORE", KEYS[1], ARGV[1]) then
|
||||
return "completed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[2], ARGV[1]) then
|
||||
return "failed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[3], ARGV[1]) then
|
||||
return "delayed"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[8], ARGV[1]) then
|
||||
return "prioritized"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[4] , ARGV[1]) then
|
||||
return "active"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[5] , ARGV[1]) then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("LPOS", KEYS[6] , ARGV[1]) then
|
||||
return "waiting"
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[7] , ARGV[1]) then
|
||||
return "waiting-children"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
9
backend/node_modules/bullmq/dist/cjs/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/cjs/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
--[[
|
||||
Add marker if needed when a job is available.
|
||||
]]
|
||||
|
||||
local function addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
if not isPausedOrMaxed then
|
||||
rcall("ZADD", markerKey, 0, "0")
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getNextDelayedTimestamp"
|
||||
|
||||
local function addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
local nextTimestamp = getNextDelayedTimestamp(delayedKey)
|
||||
if nextTimestamp ~= nil then
|
||||
-- Replace the score of the marker with the newest known
|
||||
-- next timestamp.
|
||||
rcall("ZADD", markerKey, nextTimestamp, "1")
|
||||
end
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Adds a delayed job to the queue by doing the following:
|
||||
- Creates a new job key with the job data.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayMarkerIfNeeded"
|
||||
--- @include "getDelayedScore"
|
||||
|
||||
local function addDelayedJob(jobId, delayedKey, eventsKey, timestamp,
|
||||
maxEvents, markerKey, delay)
|
||||
|
||||
local score, delayedTimestamp = getDelayedScore(delayedKey, timestamp, tonumber(delay))
|
||||
|
||||
rcall("ZADD", delayedKey, score, jobId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- mark that a delayed job is available
|
||||
addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
end
|
||||
39
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
39
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayedJob"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "isQueuePaused"
|
||||
--- @include "storeJob"
|
||||
--- @include "getTargetQueueList"
|
||||
--- @include "addJobInTargetList"
|
||||
|
||||
local function addJobFromScheduler(jobKey, jobId, opts, waitKey, pausedKey, activeKey, metaKey,
|
||||
prioritizedKey, priorityCounter, delayedKey, markerKey, eventsKey, name, maxEvents, timestamp,
|
||||
data, jobSchedulerId, repeatDelay)
|
||||
|
||||
opts['delay'] = repeatDelay
|
||||
opts['jobId'] = jobId
|
||||
|
||||
local delay, priority = storeJob(eventsKey, jobKey, jobId, name, data,
|
||||
opts, timestamp, nil, nil, jobSchedulerId)
|
||||
|
||||
if delay ~= 0 then
|
||||
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, markerKey, delay)
|
||||
else
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey)
|
||||
|
||||
-- Standard or priority add
|
||||
if priority == 0 then
|
||||
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
else
|
||||
-- Priority add
|
||||
addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounter, isPausedOrMaxed)
|
||||
end
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", jobId)
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to add job in target list and add marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
|
||||
local function addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
rcall(pushCmd, targetKey, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/cjs/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to add job considering priority.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
--- @include "getPriorityScore"
|
||||
|
||||
local function addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounterKey,
|
||||
isPausedOrMaxed)
|
||||
local score = getPriorityScore(priority, priorityCounterKey)
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/cjs/commands/includes/batches.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/cjs/commands/includes/batches.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to loop in batches.
|
||||
Just a bit of warning, some commands as ZREM
|
||||
could receive a maximum of 7000 parameters per call.
|
||||
]]
|
||||
|
||||
local function batches(n, batchSize)
|
||||
local i = 0
|
||||
|
||||
return function()
|
||||
local from = i * batchSize + 1
|
||||
i = i + 1
|
||||
if (from <= n) then
|
||||
local to = math.min(from + batchSize - 1, n)
|
||||
return from, to
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/cjs/commands/includes/checkItemInList.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/cjs/commands/includes/checkItemInList.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to check if a item belongs to a list.
|
||||
]]
|
||||
|
||||
local function checkItemInList(list, item)
|
||||
for _, v in pairs(list) do
|
||||
if v == item then
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return nil
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanList.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanList.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Function to clean job list.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanList(listKey, jobKeyPrefix, rangeStart, rangeEnd,
|
||||
timestamp, isWaiting, jobSchedulersKey)
|
||||
local jobs = rcall("LRANGE", listKey, rangeStart, rangeEnd)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
local deletionMarker = ''
|
||||
local jobIdsLen = #jobs
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
if (isWaiting or rcall("EXISTS", jobKey .. ":lock") == 0) and
|
||||
not isJobSchedulerJob(job, jobKey, jobSchedulersKey) then
|
||||
-- Find the right timestamp of the job to compare to maxTimestamp:
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
-- * processedOn represents when the job was last attempted, but it doesn't get populated until
|
||||
-- the job is first tried
|
||||
-- * timestamp is the original job submission time
|
||||
-- Fetch all three of these (in that order) and use the first one that is set so that we'll leave jobs
|
||||
-- that have been active within the grace period:
|
||||
jobTS = getTimestamp(jobKey, {"finishedOn", "processedOn", "timestamp"})
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
-- replace the entry with a deletion marker; the actual deletion will
|
||||
-- occur at the end of the script
|
||||
rcall("LSET", listKey, rangeEnd - jobIdsLen + i, deletionMarker)
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]])
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
rcall("LREM", listKey, 0, deletionMarker)
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
58
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanSet.lua
generated
vendored
Normal file
58
backend/node_modules/bullmq/dist/cjs/commands/includes/cleanSet.lua
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
--[[
|
||||
Function to clean job set.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "batches"
|
||||
--- @include "getJobsInZset"
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanSet(
|
||||
setKey,
|
||||
jobKeyPrefix,
|
||||
rangeEnd,
|
||||
timestamp,
|
||||
limit,
|
||||
attributes,
|
||||
isFinished,
|
||||
jobSchedulersKey)
|
||||
local jobs = getJobsInZset(setKey, rangeEnd, limit)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
-- Extract a Job Scheduler Id from jobId ("repeat:job-scheduler-id:millis")
|
||||
-- and check if it is in the scheduled jobs
|
||||
if not (jobSchedulersKey and isJobSchedulerJob(job, jobKey, jobSchedulersKey)) then
|
||||
if isFinished then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
else
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
jobTS = getTimestamp(jobKey, attributes)
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if (#deleted > 0) then
|
||||
for from, to in batches(#deleted, 7000) do
|
||||
rcall("ZREM", setKey, unpack(deleted, from, to))
|
||||
end
|
||||
end
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
46
backend/node_modules/bullmq/dist/cjs/commands/includes/collectMetrics.lua
generated
vendored
Normal file
46
backend/node_modules/bullmq/dist/cjs/commands/includes/collectMetrics.lua
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
--[[
|
||||
Functions to collect metrics based on a current and previous count of jobs.
|
||||
Granualarity is fixed at 1 minute.
|
||||
]]
|
||||
--- @include "batches"
|
||||
local function collectMetrics(metaKey, dataPointsList, maxDataPoints,
|
||||
timestamp)
|
||||
-- Increment current count
|
||||
local count = rcall("HINCRBY", metaKey, "count", 1) - 1
|
||||
|
||||
-- Compute how many data points we need to add to the list, N.
|
||||
local prevTS = rcall("HGET", metaKey, "prevTS")
|
||||
|
||||
if not prevTS then
|
||||
-- If prevTS is nil, set it to the current timestamp
|
||||
rcall("HSET", metaKey, "prevTS", timestamp, "prevCount", 0)
|
||||
return
|
||||
end
|
||||
|
||||
local N = math.min(math.floor(timestamp / 60000) - math.floor(prevTS / 60000), tonumber(maxDataPoints))
|
||||
|
||||
if N > 0 then
|
||||
local delta = count - rcall("HGET", metaKey, "prevCount")
|
||||
-- If N > 1, add N-1 zeros to the list
|
||||
if N > 1 then
|
||||
local points = {}
|
||||
points[1] = delta
|
||||
for i = 2, N do
|
||||
points[i] = 0
|
||||
end
|
||||
|
||||
for from, to in batches(#points, 7000) do
|
||||
rcall("LPUSH", dataPointsList, unpack(points, from, to))
|
||||
end
|
||||
else
|
||||
-- LPUSH delta to the list
|
||||
rcall("LPUSH", dataPointsList, delta)
|
||||
end
|
||||
|
||||
-- LTRIM to keep list to its max size
|
||||
rcall("LTRIM", dataPointsList, 0, maxDataPoints - 1)
|
||||
|
||||
-- update prev count with current count
|
||||
rcall("HSET", metaKey, "prevCount", count, "prevTS", timestamp)
|
||||
end
|
||||
end
|
||||
102
backend/node_modules/bullmq/dist/cjs/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
102
backend/node_modules/bullmq/dist/cjs/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
--[[
|
||||
Function to debounce a job.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "removeJobKeys"
|
||||
|
||||
local function removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents, currentDeduplicatedJobId,
|
||||
jobId, deduplicationId, prefix)
|
||||
if rcall("ZREM", delayedKey, currentDeduplicatedJobId) > 0 then
|
||||
removeJobKeys(prefix .. currentDeduplicatedJobId)
|
||||
rcall("XADD", eventsKey, "*", "event", "removed", "jobId", currentDeduplicatedJobId,
|
||||
"prev", "delayed")
|
||||
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
jobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
jobId, "deduplicationId", deduplicationId, "deduplicatedJobId", currentDeduplicatedJobId)
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local function deduplicateJob(deduplicationOpts, jobId, delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
prefix)
|
||||
local deduplicationId = deduplicationOpts and deduplicationOpts['id']
|
||||
if deduplicationId then
|
||||
local ttl = deduplicationOpts['ttl']
|
||||
if deduplicationOpts['replace'] then
|
||||
if ttl and ttl > 0 then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
if isRemoved then
|
||||
if deduplicationOpts['extend'] then
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'KEEPTTL')
|
||||
end
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
|
||||
if isRemoved then
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
end
|
||||
end
|
||||
else
|
||||
local deduplicationKeyExists
|
||||
if ttl and ttl > 0 then
|
||||
if deduplicationOpts['extend'] then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
rcall('SET', deduplicationKey, currentDebounceJobId, 'PX', ttl)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced",
|
||||
"jobId", currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'PX', ttl, 'NX')
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'NX')
|
||||
end
|
||||
|
||||
if deduplicationKeyExists then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/cjs/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/cjs/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to destructure job key.
|
||||
Just a bit of warning, these functions may be a bit slow and affect performance significantly.
|
||||
]]
|
||||
|
||||
local getJobIdFromKey = function (jobKey)
|
||||
return string.match(jobKey, ".*:(.*)")
|
||||
end
|
||||
|
||||
local getJobKeyPrefix = function (jobKey, jobId)
|
||||
return string.sub(jobKey, 0, #jobKey - #jobId)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/cjs/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/cjs/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to filter out jobs to ignore from a table.
|
||||
]]
|
||||
|
||||
local function filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
local filteredJobs = {}
|
||||
for i = 1, #jobs do
|
||||
if not jobsToIgnore[jobs[i]] then
|
||||
table.insert(filteredJobs, jobs[i])
|
||||
end
|
||||
end
|
||||
return filteredJobs
|
||||
end
|
||||
|
||||
70
backend/node_modules/bullmq/dist/cjs/commands/includes/findPage.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/cjs/commands/includes/findPage.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Function to achieve pagination for a set or hash.
|
||||
This function simulates pagination in the most efficient way possible
|
||||
for a set using sscan or hscan.
|
||||
|
||||
The main limitation is that sets are not order preserving, so the
|
||||
pagination is not stable. This means that if the set is modified
|
||||
between pages, the same element may appear in different pages.
|
||||
]] -- Maximum number of elements to be returned by sscan per iteration.
|
||||
local maxCount = 100
|
||||
|
||||
-- Finds the cursor, and returns the first elements available for the requested page.
|
||||
local function findPage(key, command, pageStart, pageSize, cursor, offset,
|
||||
maxIterations, fetchJobs)
|
||||
local items = {}
|
||||
local jobs = {}
|
||||
local iterations = 0
|
||||
|
||||
repeat
|
||||
-- Iterate over the set using sscan/hscan.
|
||||
local result = rcall(command, key, cursor, "COUNT", maxCount)
|
||||
|
||||
cursor = result[1]
|
||||
local members = result[2]
|
||||
local step = 1
|
||||
if command == "HSCAN" then
|
||||
step = 2
|
||||
end
|
||||
|
||||
if #members == 0 then
|
||||
-- If the result is empty, we can return the result.
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
|
||||
local chunkStart = offset
|
||||
local chunkEnd = offset + #members / step
|
||||
|
||||
local pageEnd = pageStart + pageSize
|
||||
|
||||
if chunkEnd < pageStart then
|
||||
-- If the chunk is before the page, we can skip it.
|
||||
offset = chunkEnd
|
||||
elseif chunkStart > pageEnd then
|
||||
-- If the chunk is after the page, we can return the result.
|
||||
return cursor, offset, items, jobs
|
||||
else
|
||||
-- If the chunk is overlapping the page, we need to add the elements to the result.
|
||||
for i = 1, #members, step do
|
||||
if offset >= pageEnd then
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
if offset >= pageStart then
|
||||
local index = #items + 1
|
||||
if fetchJobs ~= nil then
|
||||
jobs[#jobs+1] = rcall("HGETALL", members[i])
|
||||
end
|
||||
if step == 2 then
|
||||
items[index] = {members[i], members[i + 1]}
|
||||
else
|
||||
items[index] = members[i]
|
||||
end
|
||||
end
|
||||
offset = offset + 1
|
||||
end
|
||||
end
|
||||
iterations = iterations + 1
|
||||
until cursor == "0" or iterations >= maxIterations
|
||||
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
25
backend/node_modules/bullmq/dist/cjs/commands/includes/getDelayedScore.lua
generated
vendored
Normal file
25
backend/node_modules/bullmq/dist/cjs/commands/includes/getDelayedScore.lua
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
--[[
|
||||
Bake in the job id first 12 bits into the timestamp
|
||||
to guarantee correct execution order of delayed jobs
|
||||
(up to 4096 jobs per given timestamp or 4096 jobs apart per timestamp)
|
||||
WARNING: Jobs that are so far apart that they wrap around will cause FIFO to fail
|
||||
]]
|
||||
local function getDelayedScore(delayedKey, timestamp, delay)
|
||||
local delayedTimestamp = (delay > 0 and (tonumber(timestamp) + delay)) or tonumber(timestamp)
|
||||
local minScore = delayedTimestamp * 0x1000
|
||||
local maxScore = (delayedTimestamp + 1 ) * 0x1000 - 1
|
||||
|
||||
local result = rcall("ZREVRANGEBYSCORE", delayedKey, maxScore,
|
||||
minScore, "WITHSCORES","LIMIT", 0, 1)
|
||||
if #result then
|
||||
local currentMaxScore = tonumber(result[2])
|
||||
if currentMaxScore ~= nil then
|
||||
if currentMaxScore >= maxScore then
|
||||
return maxScore, delayedTimestamp
|
||||
else
|
||||
return currentMaxScore + 1, delayedTimestamp
|
||||
end
|
||||
end
|
||||
end
|
||||
return minScore, delayedTimestamp
|
||||
end
|
||||
28
backend/node_modules/bullmq/dist/cjs/commands/includes/getJobSchedulerEveryNextMillis.lua
generated
vendored
Normal file
28
backend/node_modules/bullmq/dist/cjs/commands/includes/getJobSchedulerEveryNextMillis.lua
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
|
||||
|
||||
local function getJobSchedulerEveryNextMillis(prevMillis, every, now, offset, startDate)
|
||||
local nextMillis
|
||||
if not prevMillis then
|
||||
if startDate then
|
||||
-- Assuming startDate is passed as milliseconds from JavaScript
|
||||
nextMillis = tonumber(startDate)
|
||||
nextMillis = nextMillis > now and nextMillis or now
|
||||
else
|
||||
nextMillis = now
|
||||
end
|
||||
else
|
||||
nextMillis = prevMillis + every
|
||||
-- check if we may have missed some iterations
|
||||
if nextMillis < now then
|
||||
nextMillis = math.floor(now / every) * every + every + (offset or 0)
|
||||
end
|
||||
end
|
||||
|
||||
if not offset or offset == 0 then
|
||||
local timeSlot = math.floor(nextMillis / every) * every;
|
||||
offset = nextMillis - timeSlot;
|
||||
end
|
||||
|
||||
-- Return a tuple nextMillis, offset
|
||||
return math.floor(nextMillis), math.floor(offset)
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/cjs/commands/includes/getJobsInZset.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/cjs/commands/includes/getJobsInZset.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
We use ZRANGEBYSCORE to make the case where we're deleting a limited number
|
||||
of items in a sorted set only run a single iteration. If we simply used
|
||||
ZRANGE, we may take a long time traversing through jobs that are within the
|
||||
grace period.
|
||||
]]
|
||||
local function getJobsInZset(zsetKey, rangeEnd, limit)
|
||||
if limit > 0 then
|
||||
return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd, "LIMIT", 0, limit)
|
||||
else
|
||||
return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd)
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/cjs/commands/includes/getNextDelayedTimestamp.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/cjs/commands/includes/getNextDelayedTimestamp.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Function to return the next delayed job timestamp.
|
||||
]]
|
||||
local function getNextDelayedTimestamp(delayedKey)
|
||||
local result = rcall("ZRANGE", delayedKey, 0, 0, "WITHSCORES")
|
||||
if #result then
|
||||
local nextTimestamp = tonumber(result[2])
|
||||
if nextTimestamp ~= nil then
|
||||
return nextTimestamp / 0x1000
|
||||
end
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/cjs/commands/includes/getOrSetMaxEvents.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/cjs/commands/includes/getOrSetMaxEvents.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to get max events value or set by default 10000.
|
||||
]]
|
||||
local function getOrSetMaxEvents(metaKey)
|
||||
local maxEvents = rcall("HGET", metaKey, "opts.maxLenEvents")
|
||||
if not maxEvents then
|
||||
maxEvents = 10000
|
||||
rcall("HSET", metaKey, "opts.maxLenEvents", maxEvents)
|
||||
end
|
||||
return maxEvents
|
||||
end
|
||||
8
backend/node_modules/bullmq/dist/cjs/commands/includes/getPriorityScore.lua
generated
vendored
Normal file
8
backend/node_modules/bullmq/dist/cjs/commands/includes/getPriorityScore.lua
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
--[[
|
||||
Function to get priority score.
|
||||
]]
|
||||
|
||||
local function getPriorityScore(priority, priorityCounterKey)
|
||||
local prioCounter = rcall("INCR", priorityCounterKey)
|
||||
return priority * 0x100000000 + prioCounter % 0x100000000
|
||||
end
|
||||
17
backend/node_modules/bullmq/dist/cjs/commands/includes/getRateLimitTTL.lua
generated
vendored
Normal file
17
backend/node_modules/bullmq/dist/cjs/commands/includes/getRateLimitTTL.lua
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
--[[
|
||||
Function to get current rate limit ttl.
|
||||
]]
|
||||
local function getRateLimitTTL(maxJobs, rateLimiterKey)
|
||||
if maxJobs and maxJobs <= tonumber(rcall("GET", rateLimiterKey) or 0) then
|
||||
local pttl = rcall("PTTL", rateLimiterKey)
|
||||
|
||||
if pttl == 0 then
|
||||
rcall("DEL", rateLimiterKey)
|
||||
end
|
||||
|
||||
if pttl > 0 then
|
||||
return pttl
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
22
backend/node_modules/bullmq/dist/cjs/commands/includes/getTargetQueueList.lua
generated
vendored
Normal file
22
backend/node_modules/bullmq/dist/cjs/commands/includes/getTargetQueueList.lua
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
--[[
|
||||
Function to check for the meta.paused key to decide if we are paused or not
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
|
||||
local function getTargetQueueList(queueMetaKey, activeKey, waitKey, pausedKey)
|
||||
local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency", "max", "duration")
|
||||
|
||||
if queueAttributes[1] then
|
||||
return pausedKey, true, queueAttributes[3], queueAttributes[4]
|
||||
else
|
||||
if queueAttributes[2] then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
if activeCount >= tonumber(queueAttributes[2]) then
|
||||
return waitKey, true, queueAttributes[3], queueAttributes[4]
|
||||
else
|
||||
return waitKey, false, queueAttributes[3], queueAttributes[4]
|
||||
end
|
||||
end
|
||||
end
|
||||
return waitKey, false, queueAttributes[3], queueAttributes[4]
|
||||
end
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/includes/getTimestamp.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/includes/getTimestamp.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Function to get the latest saved timestamp.
|
||||
]]
|
||||
|
||||
local function getTimestamp(jobKey, attributes)
|
||||
if #attributes == 1 then
|
||||
return rcall("HGET", jobKey, attributes[1])
|
||||
end
|
||||
|
||||
local jobTs
|
||||
for _, ts in ipairs(rcall("HMGET", jobKey, unpack(attributes))) do
|
||||
if (ts) then
|
||||
jobTs = ts
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
return jobTs
|
||||
end
|
||||
7
backend/node_modules/bullmq/dist/cjs/commands/includes/getZSetItems.lua
generated
vendored
Normal file
7
backend/node_modules/bullmq/dist/cjs/commands/includes/getZSetItems.lua
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
--[[
|
||||
Function to get ZSet items.
|
||||
]]
|
||||
|
||||
local function getZSetItems(keyName, max)
|
||||
return rcall('ZRANGE', keyName, 0, max - 1)
|
||||
end
|
||||
26
backend/node_modules/bullmq/dist/cjs/commands/includes/handleDuplicatedJob.lua
generated
vendored
Normal file
26
backend/node_modules/bullmq/dist/cjs/commands/includes/handleDuplicatedJob.lua
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
--[[
|
||||
Function to handle the case when job is duplicated.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "updateExistingJobsParent"
|
||||
|
||||
local function handleDuplicatedJob(jobKey, jobId, currentParentKey, currentParent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey, maxEvents, timestamp)
|
||||
local existedParentKey = rcall("HGET", jobKey, "parentKey")
|
||||
|
||||
if not existedParentKey or existedParentKey == currentParentKey then
|
||||
updateExistingJobsParent(currentParentKey, currentParent, parentData,
|
||||
parentDependenciesKey, completedKey, jobKey,
|
||||
jobId, timestamp)
|
||||
else
|
||||
if currentParentKey ~= nil and currentParentKey ~= existedParentKey
|
||||
and (rcall("EXISTS", existedParentKey) == 1) then
|
||||
return -7
|
||||
end
|
||||
end
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event",
|
||||
"duplicated", "jobId", jobId)
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/isJobSchedulerJob.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/isJobSchedulerJob.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to check if the job belongs to a job scheduler and
|
||||
current delayed job matches with jobId
|
||||
]]
|
||||
local function isJobSchedulerJob(jobId, jobKey, jobSchedulersKey)
|
||||
local repeatJobKey = rcall("HGET", jobKey, "rjk")
|
||||
if repeatJobKey then
|
||||
local prevMillis = rcall("ZSCORE", jobSchedulersKey, repeatJobKey)
|
||||
if prevMillis then
|
||||
local currentDelayedJobId = "repeat:" .. repeatJobKey .. ":" .. prevMillis
|
||||
return jobId == currentDelayedJobId
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
34
backend/node_modules/bullmq/dist/cjs/commands/includes/isLocked.lua
generated
vendored
Normal file
34
backend/node_modules/bullmq/dist/cjs/commands/includes/isLocked.lua
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
--[[
|
||||
Function to recursively check if there are no locks
|
||||
on the jobs to be removed.
|
||||
|
||||
returns:
|
||||
boolean
|
||||
]]
|
||||
--- @include "destructureJobKey"
|
||||
|
||||
local function isLocked( prefix, jobId, removeChildren)
|
||||
local jobKey = prefix .. jobId;
|
||||
|
||||
-- Check if this job is locked
|
||||
local lockKey = jobKey .. ':lock'
|
||||
local lock = rcall("GET", lockKey)
|
||||
if not lock then
|
||||
if removeChildren == "1" then
|
||||
local dependencies = rcall("SMEMBERS", jobKey .. ":dependencies")
|
||||
if (#dependencies > 0) then
|
||||
for i, childJobKey in ipairs(dependencies) do
|
||||
-- We need to get the jobId for this job.
|
||||
local childJobId = getJobIdFromKey(childJobKey)
|
||||
local childJobPrefix = getJobKeyPrefix(childJobKey, childJobId)
|
||||
local result = isLocked( childJobPrefix, childJobId, removeChildren )
|
||||
if result then
|
||||
return true
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
return true
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueueMaxed.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueueMaxed.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to check if queue is maxed or not.
|
||||
]]
|
||||
local function isQueueMaxed(queueMetaKey, activeKey)
|
||||
local maxConcurrency = rcall("HGET", queueMetaKey, "concurrency")
|
||||
|
||||
if maxConcurrency then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
if activeCount >= tonumber(maxConcurrency) then
|
||||
return true
|
||||
end
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
7
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueuePaused.lua
generated
vendored
Normal file
7
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueuePaused.lua
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
--[[
|
||||
Function to check for the meta.paused key to decide if we are paused or not
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
local function isQueuePaused(queueMetaKey)
|
||||
return rcall("HEXISTS", queueMetaKey, "paused") == 1
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueuePausedOrMaxed.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/cjs/commands/includes/isQueuePausedOrMaxed.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to check if queue is paused or maxed
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
|
||||
local function isQueuePausedOrMaxed(queueMetaKey, activeKey)
|
||||
local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency")
|
||||
|
||||
if queueAttributes[1] then
|
||||
return true
|
||||
else
|
||||
if queueAttributes[2] then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
return activeCount >= tonumber(queueAttributes[2])
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
70
backend/node_modules/bullmq/dist/cjs/commands/includes/moveChildFromDependenciesIfNeeded.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/cjs/commands/includes/moveChildFromDependenciesIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Function to recursively move from waitingChildren to failed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNoPendingDependencies"
|
||||
--- @include "moveParentToWaitIfNeeded"
|
||||
--- @include "moveParentToWait"
|
||||
|
||||
local handleChildFailureAndMoveParentToWait = function (parentQueueKey, parentKey, parentId, jobIdKey, timestamp)
|
||||
if rcall("EXISTS", parentKey) == 1 then
|
||||
local parentWaitingChildrenKey = parentQueueKey .. ":waiting-children"
|
||||
local parentDelayedKey = parentQueueKey .. ":delayed"
|
||||
local parentWaitingChildrenOrDelayedKey
|
||||
if rcall("ZSCORE", parentWaitingChildrenKey, parentId) then
|
||||
parentWaitingChildrenOrDelayedKey = parentWaitingChildrenKey
|
||||
elseif rcall("ZSCORE", parentDelayedKey, parentId) then
|
||||
parentWaitingChildrenOrDelayedKey = parentDelayedKey
|
||||
rcall("HSET", parentKey, "delay", 0)
|
||||
end
|
||||
|
||||
if parentWaitingChildrenOrDelayedKey then
|
||||
rcall("ZREM", parentWaitingChildrenOrDelayedKey, parentId)
|
||||
local deferredFailure = "child " .. jobIdKey .. " failed"
|
||||
rcall("HSET", parentKey, "defa", deferredFailure)
|
||||
moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
else
|
||||
if not rcall("ZSCORE", parentQueueKey .. ":failed", parentId) then
|
||||
local deferredFailure = "child " .. jobIdKey .. " failed"
|
||||
rcall("HSET", parentKey, "defa", deferredFailure)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local moveChildFromDependenciesIfNeeded = function (rawParentData, childKey, failedReason, timestamp)
|
||||
if rawParentData then
|
||||
local parentData = cjson.decode(rawParentData)
|
||||
local parentKey = parentData['queueKey'] .. ':' .. parentData['id']
|
||||
local parentDependenciesChildrenKey = parentKey .. ":dependencies"
|
||||
if parentData['fpof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
local parentUnsuccessfulChildrenKey = parentKey .. ":unsuccessful"
|
||||
rcall("ZADD", parentUnsuccessfulChildrenKey, timestamp, childKey)
|
||||
handleChildFailureAndMoveParentToWait(
|
||||
parentData['queueKey'],
|
||||
parentKey,
|
||||
parentData['id'],
|
||||
childKey,
|
||||
timestamp
|
||||
)
|
||||
end
|
||||
elseif parentData['cpof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
local parentFailedChildrenKey = parentKey .. ":failed"
|
||||
rcall("HSET", parentFailedChildrenKey, childKey, failedReason)
|
||||
moveParentToWaitIfNeeded(parentData['queueKey'], parentKey, parentData['id'], timestamp)
|
||||
end
|
||||
elseif parentData['idof'] or parentData['rdof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
moveParentToWaitIfNoPendingDependencies(parentData['queueKey'], parentDependenciesChildrenKey,
|
||||
parentKey, parentData['id'], timestamp)
|
||||
if parentData['idof'] then
|
||||
local parentFailedChildrenKey = parentKey .. ":failed"
|
||||
rcall("HSET", parentFailedChildrenKey, childKey, failedReason)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/cjs/commands/includes/moveJobFromPrioritizedToActive.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/cjs/commands/includes/moveJobFromPrioritizedToActive.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Function to move job from prioritized state to active.
|
||||
]]
|
||||
|
||||
local function moveJobFromPrioritizedToActive(priorityKey, activeKey, priorityCounterKey)
|
||||
local prioritizedJob = rcall("ZPOPMIN", priorityKey)
|
||||
if #prioritizedJob > 0 then
|
||||
rcall("LPUSH", activeKey, prioritizedJob[1])
|
||||
return prioritizedJob[1]
|
||||
else
|
||||
rcall("DEL", priorityCounterKey)
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/moveJobToWait.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/moveJobToWait.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to move job to wait to be picked up by a waiting worker.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "getTargetQueueList"
|
||||
|
||||
local function moveJobToWait(metaKey, activeKey, waitKey, pausedKey, markerKey, eventStreamKey,
|
||||
jobId, pushCmd)
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey)
|
||||
addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
|
||||
rcall("XADD", eventStreamKey, "*", "event", "waiting", "jobId", jobId, 'prev', 'active')
|
||||
end
|
||||
45
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWait.lua
generated
vendored
Normal file
45
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWait.lua
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
--[[
|
||||
Move parent to a wait status (wait, prioritized or delayed)
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayMarkerIfNeeded"
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "isQueuePausedOrMaxed"
|
||||
--- @include "getTargetQueueList"
|
||||
local function moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
local parentWaitKey = parentQueueKey .. ":wait"
|
||||
local parentPausedKey = parentQueueKey .. ":paused"
|
||||
local parentActiveKey = parentQueueKey .. ":active"
|
||||
local parentMetaKey = parentQueueKey .. ":meta"
|
||||
|
||||
local parentMarkerKey = parentQueueKey .. ":marker"
|
||||
local jobAttributes = rcall("HMGET", parentKey, "priority", "delay")
|
||||
local priority = tonumber(jobAttributes[1]) or 0
|
||||
local delay = tonumber(jobAttributes[2]) or 0
|
||||
|
||||
if delay > 0 then
|
||||
local delayedTimestamp = tonumber(timestamp) + delay
|
||||
local score = delayedTimestamp * 0x1000
|
||||
local parentDelayedKey = parentQueueKey .. ":delayed"
|
||||
rcall("ZADD", parentDelayedKey, score, parentId)
|
||||
rcall("XADD", parentQueueKey .. ":events", "*", "event", "delayed", "jobId", parentId, "delay",
|
||||
delayedTimestamp)
|
||||
|
||||
addDelayMarkerIfNeeded(parentMarkerKey, parentDelayedKey)
|
||||
else
|
||||
if priority == 0 then
|
||||
local parentTarget, isParentPausedOrMaxed = getTargetQueueList(parentMetaKey, parentActiveKey,
|
||||
parentWaitKey, parentPausedKey)
|
||||
addJobInTargetList(parentTarget, parentMarkerKey, "RPUSH", isParentPausedOrMaxed, parentId)
|
||||
else
|
||||
local isPausedOrMaxed = isQueuePausedOrMaxed(parentMetaKey, parentActiveKey)
|
||||
addJobWithPriority(parentMarkerKey, parentQueueKey .. ":prioritized", priority, parentId,
|
||||
parentQueueKey .. ":pc", isPausedOrMaxed)
|
||||
end
|
||||
|
||||
rcall("XADD", parentQueueKey .. ":events", "*", "event", "waiting", "jobId", parentId, "prev",
|
||||
"waiting-children")
|
||||
end
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWaitIfNeeded.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWaitIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Validate and move parent to a wait status (waiting, delayed or prioritized) if needed.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "moveParentToWait"
|
||||
local function moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp)
|
||||
if rcall("EXISTS", parentKey) == 1 then
|
||||
local parentWaitingChildrenKey = parentQueueKey .. ":waiting-children"
|
||||
if rcall("ZSCORE", parentWaitingChildrenKey, parentId) then
|
||||
rcall("ZREM", parentWaitingChildrenKey, parentId)
|
||||
moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWaitIfNoPendingDependencies.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/cjs/commands/includes/moveParentToWaitIfNoPendingDependencies.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Validate and move parent to a wait status (waiting, delayed or prioritized)
|
||||
if no pending dependencies.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNeeded"
|
||||
local function moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey,
|
||||
parentId, timestamp)
|
||||
local doNotHavePendingDependencies = rcall("SCARD", parentDependenciesKey) == 0
|
||||
if doNotHavePendingDependencies then
|
||||
moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/cjs/commands/includes/prepareJobForProcessing.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/cjs/commands/includes/prepareJobForProcessing.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Function to move job from wait state to active.
|
||||
Input:
|
||||
opts - token - lock token
|
||||
opts - lockDuration
|
||||
opts - limiter
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
|
||||
local function prepareJobForProcessing(keyPrefix, rateLimiterKey, eventStreamKey,
|
||||
jobId, processedOn, maxJobs, limiterDuration, markerKey, opts)
|
||||
local jobKey = keyPrefix .. jobId
|
||||
|
||||
-- Check if we need to perform rate limiting.
|
||||
if maxJobs then
|
||||
local jobCounter = tonumber(rcall("INCR", rateLimiterKey))
|
||||
|
||||
if jobCounter == 1 then
|
||||
local integerDuration = math.floor(math.abs(limiterDuration))
|
||||
rcall("PEXPIRE", rateLimiterKey, integerDuration)
|
||||
end
|
||||
end
|
||||
|
||||
-- get a lock
|
||||
if opts['token'] ~= "0" then
|
||||
local lockKey = jobKey .. ':lock'
|
||||
rcall("SET", lockKey, opts['token'], "PX", opts['lockDuration'])
|
||||
end
|
||||
|
||||
local optionalValues = {}
|
||||
|
||||
if opts['name'] then
|
||||
-- Set "processedBy" field to the worker name
|
||||
table.insert(optionalValues, "pb")
|
||||
table.insert(optionalValues, opts['name'])
|
||||
end
|
||||
|
||||
rcall("XADD", eventStreamKey, "*", "event", "active", "jobId", jobId, "prev", "waiting")
|
||||
rcall("HMSET", jobKey, "processedOn", processedOn, unpack(optionalValues))
|
||||
rcall("HINCRBY", jobKey, "ats", 1)
|
||||
|
||||
addBaseMarkerIfNeeded(markerKey, false)
|
||||
|
||||
-- rate limit delay must be 0 in this case to prevent adding more delay
|
||||
-- when job that is moved to active needs to be processed
|
||||
return {rcall("HGETALL", jobKey), jobId, 0, 0} -- get job data
|
||||
end
|
||||
44
backend/node_modules/bullmq/dist/cjs/commands/includes/promoteDelayedJobs.lua
generated
vendored
Normal file
44
backend/node_modules/bullmq/dist/cjs/commands/includes/promoteDelayedJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
--[[
|
||||
Updates the delay set, by moving delayed jobs that should
|
||||
be processed now to "wait".
|
||||
|
||||
Events:
|
||||
'waiting'
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "getPriorityScore"
|
||||
|
||||
-- Try to get as much as 1000 jobs at once
|
||||
local function promoteDelayedJobs(delayedKey, markerKey, targetKey, prioritizedKey,
|
||||
eventStreamKey, prefix, timestamp, priorityCounterKey, isPaused)
|
||||
local jobs = rcall("ZRANGEBYSCORE", delayedKey, 0, (timestamp + 1) * 0x1000 - 1, "LIMIT", 0, 1000)
|
||||
|
||||
if (#jobs > 0) then
|
||||
rcall("ZREM", delayedKey, unpack(jobs))
|
||||
|
||||
for _, jobId in ipairs(jobs) do
|
||||
local jobKey = prefix .. jobId
|
||||
local priority =
|
||||
tonumber(rcall("HGET", jobKey, "priority")) or 0
|
||||
|
||||
if priority == 0 then
|
||||
-- LIFO or FIFO
|
||||
rcall("LPUSH", targetKey, jobId)
|
||||
else
|
||||
local score = getPriorityScore(priority, priorityCounterKey)
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
end
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventStreamKey, "*", "event", "waiting", "jobId",
|
||||
jobId, "prev", "delayed")
|
||||
rcall("HSET", jobKey, "delay", 0)
|
||||
end
|
||||
|
||||
addBaseMarkerIfNeeded(markerKey, isPaused)
|
||||
end
|
||||
end
|
||||
9
backend/node_modules/bullmq/dist/cjs/commands/includes/pushBackJobWithPriority.lua
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/cjs/commands/includes/pushBackJobWithPriority.lua
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
--[[
|
||||
Function to push back job considering priority in front of same prioritized jobs.
|
||||
]]
|
||||
local function pushBackJobWithPriority(prioritizedKey, priority, jobId)
|
||||
-- in order to put it at front of same prioritized jobs
|
||||
-- we consider prioritized counter as 0
|
||||
local score = priority * 0x100000000
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/includes/removeDeduplicationKeyIfNeededOnFinalization.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/includes/removeDeduplicationKeyIfNeededOnFinalization.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Function to remove deduplication key if needed
|
||||
when a job is moved to completed or failed states.
|
||||
]]
|
||||
|
||||
local function removeDeduplicationKeyIfNeededOnFinalization(prefixKey,
|
||||
deduplicationId, jobId)
|
||||
if deduplicationId then
|
||||
local deduplicationKey = prefixKey .. "de:" .. deduplicationId
|
||||
local pttl = rcall("PTTL", deduplicationKey)
|
||||
|
||||
if pttl == 0 then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
|
||||
if pttl == -1 then
|
||||
local currentJobId = rcall('GET', deduplicationKey)
|
||||
if currentJobId and currentJobId == jobId then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeDeduplicationKeyIfNeededOnRemoval.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeDeduplicationKeyIfNeededOnRemoval.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to remove deduplication key if needed
|
||||
when a job is being removed.
|
||||
]]
|
||||
|
||||
local function removeDeduplicationKeyIfNeededOnRemoval(prefixKey,
|
||||
jobId, deduplicationId)
|
||||
if deduplicationId then
|
||||
local deduplicationKey = prefixKey .. "de:" .. deduplicationId
|
||||
local currentJobId = rcall('GET', deduplicationKey)
|
||||
if currentJobId and currentJobId == jobId then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
end
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJob.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJob.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to remove job.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeDeduplicationKeyIfNeededOnRemoval"
|
||||
--- @include "removeJobKeys"
|
||||
--- @include "removeParentDependencyKey"
|
||||
|
||||
local function removeJob(jobId, hard, baseKey, shouldRemoveDeduplicationKey)
|
||||
local jobKey = baseKey .. jobId
|
||||
removeParentDependencyKey(jobKey, hard, nil, baseKey)
|
||||
if shouldRemoveDeduplicationKey then
|
||||
local deduplicationId = rcall("HGET", jobKey, "deid")
|
||||
removeDeduplicationKeyIfNeededOnRemoval(baseKey, jobId, deduplicationId)
|
||||
end
|
||||
removeJobKeys(jobKey)
|
||||
end
|
||||
35
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobFromAnyState.lua
generated
vendored
Normal file
35
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobFromAnyState.lua
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
--[[
|
||||
Function to remove from any state.
|
||||
|
||||
returns:
|
||||
prev state
|
||||
]]
|
||||
|
||||
local function removeJobFromAnyState( prefix, jobId)
|
||||
-- We start with the ZSCORE checks, since they have O(1) complexity
|
||||
if rcall("ZSCORE", prefix .. "completed", jobId) then
|
||||
rcall("ZREM", prefix .. "completed", jobId)
|
||||
return "completed"
|
||||
elseif rcall("ZSCORE", prefix .. "waiting-children", jobId) then
|
||||
rcall("ZREM", prefix .. "waiting-children", jobId)
|
||||
return "waiting-children"
|
||||
elseif rcall("ZSCORE", prefix .. "delayed", jobId) then
|
||||
rcall("ZREM", prefix .. "delayed", jobId)
|
||||
return "delayed"
|
||||
elseif rcall("ZSCORE", prefix .. "failed", jobId) then
|
||||
rcall("ZREM", prefix .. "failed", jobId)
|
||||
return "failed"
|
||||
elseif rcall("ZSCORE", prefix .. "prioritized", jobId) then
|
||||
rcall("ZREM", prefix .. "prioritized", jobId)
|
||||
return "prioritized"
|
||||
-- We remove only 1 element from the list, since we assume they are not added multiple times
|
||||
elseif rcall("LREM", prefix .. "wait", 1, jobId) == 1 then
|
||||
return "wait"
|
||||
elseif rcall("LREM", prefix .. "paused", 1, jobId) == 1 then
|
||||
return "paused"
|
||||
elseif rcall("LREM", prefix .. "active", 1, jobId) == 1 then
|
||||
return "active"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
end
|
||||
8
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobKeys.lua
generated
vendored
Normal file
8
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobKeys.lua
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
--[[
|
||||
Function to remove job keys.
|
||||
]]
|
||||
|
||||
local function removeJobKeys(jobKey)
|
||||
return rcall("DEL", jobKey, jobKey .. ':logs', jobKey .. ':dependencies',
|
||||
jobKey .. ':processed', jobKey .. ':failed', jobKey .. ':unsuccessful')
|
||||
end
|
||||
96
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobWithChildren.lua
generated
vendored
Normal file
96
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobWithChildren.lua
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
--[[
|
||||
Remove a job from all the statuses it may be in as well as all its data,
|
||||
including its children. Active children can be ignored.
|
||||
|
||||
Events:
|
||||
'removed'
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "destructureJobKey"
|
||||
--- @include "getOrSetMaxEvents"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeDeduplicationKeyIfNeededOnRemoval"
|
||||
--- @include "removeJobFromAnyState"
|
||||
--- @include "removeJobKeys"
|
||||
--- @include "removeParentDependencyKey"
|
||||
--- @include "isLocked"
|
||||
|
||||
local removeJobChildren
|
||||
local removeJobWithChildren
|
||||
|
||||
removeJobChildren = function(prefix, jobKey, options)
|
||||
-- Check if this job has children
|
||||
-- If so, we are going to try to remove the children recursively in a depth-first way
|
||||
-- because if some job is locked, we must exit with an error.
|
||||
|
||||
if not options.ignoreProcessed then
|
||||
local processed = rcall("HGETALL", jobKey .. ":processed")
|
||||
if #processed > 0 then
|
||||
for i = 1, #processed, 2 do
|
||||
local childJobId = getJobIdFromKey(processed[i])
|
||||
local childJobPrefix = getJobKeyPrefix(processed[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
|
||||
local failed = rcall("HGETALL", jobKey .. ":failed")
|
||||
if #failed > 0 then
|
||||
for i = 1, #failed, 2 do
|
||||
local childJobId = getJobIdFromKey(failed[i])
|
||||
local childJobPrefix = getJobKeyPrefix(failed[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
|
||||
local unsuccessful = rcall("ZRANGE", jobKey .. ":unsuccessful", 0, -1)
|
||||
if #unsuccessful > 0 then
|
||||
for i = 1, #unsuccessful, 1 do
|
||||
local childJobId = getJobIdFromKey(unsuccessful[i])
|
||||
local childJobPrefix = getJobKeyPrefix(unsuccessful[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local dependencies = rcall("SMEMBERS", jobKey .. ":dependencies")
|
||||
if #dependencies > 0 then
|
||||
for i, childJobKey in ipairs(dependencies) do
|
||||
local childJobId = getJobIdFromKey(childJobKey)
|
||||
local childJobPrefix = getJobKeyPrefix(childJobKey, childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
removeJobWithChildren = function(prefix, jobId, parentKey, options)
|
||||
local jobKey = prefix .. jobId
|
||||
|
||||
if options.ignoreLocked then
|
||||
if isLocked(prefix, jobId) then
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
-- Check if job is in the failed zset
|
||||
local failedSet = prefix .. "failed"
|
||||
if not (options.ignoreProcessed and rcall("ZSCORE", failedSet, jobId)) then
|
||||
removeParentDependencyKey(jobKey, false, parentKey, nil)
|
||||
|
||||
if options.removeChildren then
|
||||
removeJobChildren(prefix, jobKey, options)
|
||||
end
|
||||
|
||||
local prev = removeJobFromAnyState(prefix, jobId)
|
||||
local deduplicationId = rcall("HGET", jobKey, "deid")
|
||||
removeDeduplicationKeyIfNeededOnRemoval(prefix, jobId, deduplicationId)
|
||||
if removeJobKeys(jobKey) > 0 then
|
||||
local metaKey = prefix .. "meta"
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
rcall("XADD", prefix .. "events", "MAXLEN", "~", maxEvents, "*", "event", "removed",
|
||||
"jobId", jobId, "prev", prev)
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobs.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Functions to remove jobs.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobs(keys, hard, baseKey, max)
|
||||
for i, key in ipairs(keys) do
|
||||
removeJob(key, hard, baseKey, true --[[remove debounce key]])
|
||||
end
|
||||
return max - #keys
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobsByMaxAge.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobsByMaxAge.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Functions to remove jobs by max age.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix, maxLimit)
|
||||
local start = timestamp - maxAge * 1000
|
||||
local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf", "LIMIT", 0, maxLimit)
|
||||
for i, jobId in ipairs(jobIds) do
|
||||
removeJob(jobId, false, prefix, false --[[remove debounce key]])
|
||||
end
|
||||
rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start)
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobsByMaxCount.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/removeJobsByMaxCount.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Functions to remove jobs by max count.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobsByMaxCount(maxCount, targetSet, prefix)
|
||||
local start = maxCount
|
||||
local jobIds = rcall("ZREVRANGE", targetSet, start, -1)
|
||||
for i, jobId in ipairs(jobIds) do
|
||||
removeJob(jobId, false, prefix, false --[[remove debounce key]])
|
||||
end
|
||||
rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1))
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/cjs/commands/includes/removeListJobs.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/cjs/commands/includes/removeListJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Functions to remove jobs.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "filterOutJobsToIgnore"
|
||||
--- @include "removeJobs"
|
||||
|
||||
local function getListItems(keyName, max)
|
||||
return rcall('LRANGE', keyName, 0, max - 1)
|
||||
end
|
||||
|
||||
local function removeListJobs(keyName, hard, baseKey, max, jobsToIgnore)
|
||||
local jobs = getListItems(keyName, max)
|
||||
|
||||
if jobsToIgnore then
|
||||
jobs = filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
end
|
||||
|
||||
local count = removeJobs(jobs, hard, baseKey, max)
|
||||
rcall("LTRIM", keyName, #jobs, -1)
|
||||
return count
|
||||
end
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/includes/removeLock.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/includes/removeLock.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
local function removeLock(jobKey, stalledKey, token, jobId)
|
||||
if token ~= "0" then
|
||||
local lockKey = jobKey .. ':lock'
|
||||
local lockToken = rcall("GET", lockKey)
|
||||
if lockToken == token then
|
||||
rcall("DEL", lockKey)
|
||||
rcall("SREM", stalledKey, jobId)
|
||||
else
|
||||
if lockToken then
|
||||
-- Lock exists but token does not match
|
||||
return -6
|
||||
else
|
||||
-- Lock is missing completely
|
||||
return -2
|
||||
end
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
90
backend/node_modules/bullmq/dist/cjs/commands/includes/removeParentDependencyKey.lua
generated
vendored
Normal file
90
backend/node_modules/bullmq/dist/cjs/commands/includes/removeParentDependencyKey.lua
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
--[[
|
||||
Check if this job has a parent. If so we will just remove it from
|
||||
the parent child list, but if it is the last child we should move the parent to "wait/paused"
|
||||
which requires code from "moveToFinished"
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "destructureJobKey"
|
||||
--- @include "getTargetQueueList"
|
||||
--- @include "removeJobKeys"
|
||||
|
||||
local function _moveParentToWait(parentPrefix, parentId, emitEvent)
|
||||
local parentTarget, isPausedOrMaxed = getTargetQueueList(parentPrefix .. "meta", parentPrefix .. "active",
|
||||
parentPrefix .. "wait", parentPrefix .. "paused")
|
||||
addJobInTargetList(parentTarget, parentPrefix .. "marker", "RPUSH", isPausedOrMaxed, parentId)
|
||||
|
||||
if emitEvent then
|
||||
local parentEventStream = parentPrefix .. "events"
|
||||
rcall("XADD", parentEventStream, "*", "event", "waiting", "jobId", parentId, "prev", "waiting-children")
|
||||
end
|
||||
end
|
||||
|
||||
local function removeParentDependencyKey(jobKey, hard, parentKey, baseKey, debounceId)
|
||||
if parentKey then
|
||||
local parentDependenciesKey = parentKey .. ":dependencies"
|
||||
local result = rcall("SREM", parentDependenciesKey, jobKey)
|
||||
if result > 0 then
|
||||
local pendingDependencies = rcall("SCARD", parentDependenciesKey)
|
||||
if pendingDependencies == 0 then
|
||||
local parentId = getJobIdFromKey(parentKey)
|
||||
local parentPrefix = getJobKeyPrefix(parentKey, parentId)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", parentPrefix .. "waiting-children", parentId)
|
||||
|
||||
if numRemovedElements == 1 then
|
||||
if hard then -- remove parent in same queue
|
||||
if parentPrefix == baseKey then
|
||||
removeParentDependencyKey(parentKey, hard, nil, baseKey, nil)
|
||||
removeJobKeys(parentKey)
|
||||
if debounceId then
|
||||
rcall("DEL", parentPrefix .. "de:" .. debounceId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
else
|
||||
local parentAttributes = rcall("HMGET", jobKey, "parentKey", "deid")
|
||||
local missedParentKey = parentAttributes[1]
|
||||
if( (type(missedParentKey) == "string") and missedParentKey ~= ""
|
||||
and (rcall("EXISTS", missedParentKey) == 1)) then
|
||||
local parentDependenciesKey = missedParentKey .. ":dependencies"
|
||||
local result = rcall("SREM", parentDependenciesKey, jobKey)
|
||||
if result > 0 then
|
||||
local pendingDependencies = rcall("SCARD", parentDependenciesKey)
|
||||
if pendingDependencies == 0 then
|
||||
local parentId = getJobIdFromKey(missedParentKey)
|
||||
local parentPrefix = getJobKeyPrefix(missedParentKey, parentId)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", parentPrefix .. "waiting-children", parentId)
|
||||
|
||||
if numRemovedElements == 1 then
|
||||
if hard then
|
||||
if parentPrefix == baseKey then
|
||||
removeParentDependencyKey(missedParentKey, hard, nil, baseKey, nil)
|
||||
removeJobKeys(missedParentKey)
|
||||
if parentAttributes[2] then
|
||||
rcall("DEL", parentPrefix .. "de:" .. parentAttributes[2])
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
21
backend/node_modules/bullmq/dist/cjs/commands/includes/removeZSetJobs.lua
generated
vendored
Normal file
21
backend/node_modules/bullmq/dist/cjs/commands/includes/removeZSetJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
-- Includes
|
||||
--- @include "batches"
|
||||
--- @include "filterOutJobsToIgnore"
|
||||
--- @include "getZSetItems"
|
||||
--- @include "removeJobs"
|
||||
|
||||
local function removeZSetJobs(keyName, hard, baseKey, max, jobsToIgnore)
|
||||
local jobs = getZSetItems(keyName, max)
|
||||
|
||||
if jobsToIgnore then
|
||||
jobs = filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
end
|
||||
|
||||
local count = removeJobs(jobs, hard, baseKey, max)
|
||||
if(#jobs > 0) then
|
||||
for from, to in batches(#jobs, 7000) do
|
||||
rcall("ZREM", keyName, unpack(jobs, from, to))
|
||||
end
|
||||
end
|
||||
return count
|
||||
end
|
||||
36
backend/node_modules/bullmq/dist/cjs/commands/includes/storeJob.lua
generated
vendored
Normal file
36
backend/node_modules/bullmq/dist/cjs/commands/includes/storeJob.lua
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
--[[
|
||||
Function to store a job
|
||||
]]
|
||||
local function storeJob(eventsKey, jobIdKey, jobId, name, data, opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
local jsonOpts = cjson.encode(opts)
|
||||
local delay = opts['delay'] or 0
|
||||
local priority = opts['priority'] or 0
|
||||
local debounceId = opts['de'] and opts['de']['id']
|
||||
|
||||
local optionalValues = {}
|
||||
if parentKey ~= nil then
|
||||
table.insert(optionalValues, "parentKey")
|
||||
table.insert(optionalValues, parentKey)
|
||||
table.insert(optionalValues, "parent")
|
||||
table.insert(optionalValues, parentData)
|
||||
end
|
||||
|
||||
if repeatJobKey then
|
||||
table.insert(optionalValues, "rjk")
|
||||
table.insert(optionalValues, repeatJobKey)
|
||||
end
|
||||
|
||||
if debounceId then
|
||||
table.insert(optionalValues, "deid")
|
||||
table.insert(optionalValues, debounceId)
|
||||
end
|
||||
|
||||
rcall("HMSET", jobIdKey, "name", name, "data", data, "opts", jsonOpts,
|
||||
"timestamp", timestamp, "delay", delay, "priority", priority,
|
||||
unpack(optionalValues))
|
||||
|
||||
rcall("XADD", eventsKey, "*", "event", "added", "jobId", jobId, "name", name)
|
||||
|
||||
return delay, priority
|
||||
end
|
||||
66
backend/node_modules/bullmq/dist/cjs/commands/includes/storeJobScheduler.lua
generated
vendored
Normal file
66
backend/node_modules/bullmq/dist/cjs/commands/includes/storeJobScheduler.lua
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
--[[
|
||||
Function to store a job scheduler
|
||||
]]
|
||||
local function storeJobScheduler(schedulerId, schedulerKey, repeatKey, nextMillis, opts,
|
||||
templateData, templateOpts)
|
||||
rcall("ZADD", repeatKey, nextMillis, schedulerId)
|
||||
|
||||
local optionalValues = {}
|
||||
if opts['tz'] then
|
||||
table.insert(optionalValues, "tz")
|
||||
table.insert(optionalValues, opts['tz'])
|
||||
end
|
||||
|
||||
if opts['limit'] then
|
||||
table.insert(optionalValues, "limit")
|
||||
table.insert(optionalValues, opts['limit'])
|
||||
end
|
||||
|
||||
if opts['pattern'] then
|
||||
table.insert(optionalValues, "pattern")
|
||||
table.insert(optionalValues, opts['pattern'])
|
||||
end
|
||||
|
||||
if opts['startDate'] then
|
||||
table.insert(optionalValues, "startDate")
|
||||
table.insert(optionalValues, opts['startDate'])
|
||||
end
|
||||
|
||||
if opts['endDate'] then
|
||||
table.insert(optionalValues, "endDate")
|
||||
table.insert(optionalValues, opts['endDate'])
|
||||
end
|
||||
|
||||
if opts['every'] then
|
||||
table.insert(optionalValues, "every")
|
||||
table.insert(optionalValues, opts['every'])
|
||||
end
|
||||
|
||||
if opts['offset'] then
|
||||
table.insert(optionalValues, "offset")
|
||||
table.insert(optionalValues, opts['offset'])
|
||||
else
|
||||
local offset = rcall("HGET", schedulerKey, "offset")
|
||||
if offset then
|
||||
table.insert(optionalValues, "offset")
|
||||
table.insert(optionalValues, tonumber(offset))
|
||||
end
|
||||
end
|
||||
|
||||
local jsonTemplateOpts = cjson.encode(templateOpts)
|
||||
if jsonTemplateOpts and jsonTemplateOpts ~= '{}' then
|
||||
table.insert(optionalValues, "opts")
|
||||
table.insert(optionalValues, jsonTemplateOpts)
|
||||
end
|
||||
|
||||
if templateData and templateData ~= '{}' then
|
||||
table.insert(optionalValues, "data")
|
||||
table.insert(optionalValues, templateData)
|
||||
end
|
||||
|
||||
table.insert(optionalValues, "ic")
|
||||
table.insert(optionalValues, rcall("HGET", schedulerKey, "ic") or 1)
|
||||
|
||||
rcall("DEL", schedulerKey) -- remove all attributes and then re-insert new ones
|
||||
rcall("HMSET", schedulerKey, "name", opts['name'], unpack(optionalValues))
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/cjs/commands/includes/trimEvents.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/cjs/commands/includes/trimEvents.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to trim events, default 10000.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getOrSetMaxEvents"
|
||||
|
||||
local function trimEvents(metaKey, eventStreamKey)
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
if maxEvents then
|
||||
rcall("XTRIM", eventStreamKey, "MAXLEN", "~", maxEvents)
|
||||
else
|
||||
rcall("XTRIM", eventStreamKey, "MAXLEN", "~", 10000)
|
||||
end
|
||||
end
|
||||
27
backend/node_modules/bullmq/dist/cjs/commands/includes/updateExistingJobsParent.lua
generated
vendored
Normal file
27
backend/node_modules/bullmq/dist/cjs/commands/includes/updateExistingJobsParent.lua
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
--[[
|
||||
This function is used to update the parent's dependencies if the job
|
||||
is already completed and about to be ignored. The parent must get its
|
||||
dependencies updated to avoid the parent job being stuck forever in
|
||||
the waiting-children state.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "updateParentDepsIfNeeded"
|
||||
|
||||
local function updateExistingJobsParent(parentKey, parent, parentData,
|
||||
parentDependenciesKey, completedKey,
|
||||
jobIdKey, jobId, timestamp)
|
||||
if parentKey ~= nil then
|
||||
if rcall("ZSCORE", completedKey, jobId) then
|
||||
local returnvalue = rcall("HGET", jobIdKey, "returnvalue")
|
||||
updateParentDepsIfNeeded(parentKey, parent['queueKey'],
|
||||
parentDependenciesKey, parent['id'],
|
||||
jobIdKey, returnvalue, timestamp)
|
||||
else
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
end
|
||||
rcall("HMSET", jobIdKey, "parentKey", parentKey, "parent", parentData)
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/cjs/commands/includes/updateJobFields.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/cjs/commands/includes/updateJobFields.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to update a bunch of fields in a job.
|
||||
]]
|
||||
local function updateJobFields(jobKey, msgpackedFields)
|
||||
if msgpackedFields and #msgpackedFields > 0 then
|
||||
local fieldsToUpdate = cmsgpack.unpack(msgpackedFields)
|
||||
if fieldsToUpdate then
|
||||
rcall("HMSET", jobKey, unpack(fieldsToUpdate))
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/cjs/commands/includes/updateParentDepsIfNeeded.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/cjs/commands/includes/updateParentDepsIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Validate and move or add dependencies to parent.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNoPendingDependencies"
|
||||
|
||||
local function updateParentDepsIfNeeded(parentKey, parentQueueKey, parentDependenciesKey,
|
||||
parentId, jobIdKey, returnvalue, timestamp )
|
||||
local processedSet = parentKey .. ":processed"
|
||||
rcall("HSET", processedSet, jobIdKey, returnvalue)
|
||||
moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
48
backend/node_modules/bullmq/dist/cjs/commands/isFinished-3.lua
generated
vendored
Normal file
48
backend/node_modules/bullmq/dist/cjs/commands/isFinished-3.lua
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
--[[
|
||||
Checks if a job is finished (.i.e. is in the completed or failed set)
|
||||
|
||||
Input:
|
||||
KEYS[1] completed key
|
||||
KEYS[2] failed key
|
||||
KEYS[3] job key
|
||||
|
||||
ARGV[1] job id
|
||||
ARGV[2] return value?
|
||||
Output:
|
||||
0 - Not finished.
|
||||
1 - Completed.
|
||||
2 - Failed.
|
||||
-1 - Missing job.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
if rcall("EXISTS", KEYS[3]) ~= 1 then
|
||||
if ARGV[2] == "1" then
|
||||
|
||||
return {-1,"Missing key for job " .. KEYS[3] .. ". isFinished"}
|
||||
end
|
||||
return -1
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[1], ARGV[1]) then
|
||||
if ARGV[2] == "1" then
|
||||
local returnValue = rcall("HGET", KEYS[3], "returnvalue")
|
||||
|
||||
return {1,returnValue}
|
||||
end
|
||||
return 1
|
||||
end
|
||||
|
||||
if rcall("ZSCORE", KEYS[2], ARGV[1]) then
|
||||
if ARGV[2] == "1" then
|
||||
local failedReason = rcall("HGET", KEYS[3], "failedReason")
|
||||
|
||||
return {2,failedReason}
|
||||
end
|
||||
return 2
|
||||
end
|
||||
|
||||
if ARGV[2] == "1" then
|
||||
return {0}
|
||||
end
|
||||
|
||||
return 0
|
||||
16
backend/node_modules/bullmq/dist/cjs/commands/isJobInList-1.lua
generated
vendored
Normal file
16
backend/node_modules/bullmq/dist/cjs/commands/isJobInList-1.lua
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
--[[
|
||||
Checks if job is in a given list.
|
||||
|
||||
Input:
|
||||
KEYS[1]
|
||||
ARGV[1]
|
||||
|
||||
Output:
|
||||
1 if element found in the list.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/checkItemInList"
|
||||
|
||||
local items = redis.call("LRANGE", KEYS[1] , 0, -1)
|
||||
return checkItemInList(items, ARGV[1])
|
||||
17
backend/node_modules/bullmq/dist/cjs/commands/isMaxed-2.lua
generated
vendored
Normal file
17
backend/node_modules/bullmq/dist/cjs/commands/isMaxed-2.lua
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
--[[
|
||||
Checks if queue is maxed.
|
||||
|
||||
Input:
|
||||
KEYS[1] meta key
|
||||
KEYS[2] active key
|
||||
|
||||
Output:
|
||||
1 if element found in the list.
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/isQueueMaxed"
|
||||
|
||||
return isQueueMaxed(KEYS[1], KEYS[2])
|
||||
67
backend/node_modules/bullmq/dist/cjs/commands/moveJobFromActiveToWait-9.lua
generated
vendored
Normal file
67
backend/node_modules/bullmq/dist/cjs/commands/moveJobFromActiveToWait-9.lua
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
--[[
|
||||
Function to move job from active state to wait.
|
||||
Input:
|
||||
KEYS[1] active key
|
||||
KEYS[2] wait key
|
||||
|
||||
KEYS[3] stalled key
|
||||
KEYS[4] paused key
|
||||
KEYS[5] meta key
|
||||
KEYS[6] limiter key
|
||||
KEYS[7] prioritized key
|
||||
KEYS[8] marker key
|
||||
KEYS[9] event key
|
||||
|
||||
ARGV[1] job id
|
||||
ARGV[2] lock token
|
||||
ARGV[3] job id key
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/pushBackJobWithPriority"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/removeLock"
|
||||
|
||||
local jobId = ARGV[1]
|
||||
local token = ARGV[2]
|
||||
local jobKey = ARGV[3]
|
||||
|
||||
if rcall("EXISTS", jobKey) == 0 then
|
||||
return -1
|
||||
end
|
||||
|
||||
local errorCode = removeLock(jobKey, KEYS[3], token, jobId)
|
||||
if errorCode < 0 then
|
||||
return errorCode
|
||||
end
|
||||
|
||||
local metaKey = KEYS[5]
|
||||
local removed = rcall("LREM", KEYS[1], 1, jobId)
|
||||
if removed > 0 then
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[1], KEYS[2], KEYS[4])
|
||||
|
||||
local priority = tonumber(rcall("HGET", ARGV[3], "priority")) or 0
|
||||
|
||||
if priority > 0 then
|
||||
pushBackJobWithPriority(KEYS[7], priority, jobId)
|
||||
else
|
||||
addJobInTargetList(target, KEYS[8], "RPUSH", isPausedOrMaxed, jobId)
|
||||
end
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", KEYS[9], "MAXLEN", "~", maxEvents, "*", "event", "waiting",
|
||||
"jobId", jobId, "prev", "active")
|
||||
end
|
||||
|
||||
local pttl = rcall("PTTL", KEYS[6])
|
||||
|
||||
if pttl > 0 then
|
||||
return pttl
|
||||
else
|
||||
return 0
|
||||
end
|
||||
73
backend/node_modules/bullmq/dist/cjs/commands/moveJobsToWait-8.lua
generated
vendored
Normal file
73
backend/node_modules/bullmq/dist/cjs/commands/moveJobsToWait-8.lua
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
--[[
|
||||
Move completed, failed or delayed jobs to wait.
|
||||
|
||||
Note: Does not support jobs with priorities.
|
||||
|
||||
Input:
|
||||
KEYS[1] base key
|
||||
KEYS[2] events stream
|
||||
KEYS[3] state key (failed, completed, delayed)
|
||||
KEYS[4] 'wait'
|
||||
KEYS[5] 'paused'
|
||||
KEYS[6] 'meta'
|
||||
KEYS[7] 'active'
|
||||
KEYS[8] 'marker'
|
||||
|
||||
ARGV[1] count
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] prev state
|
||||
|
||||
Output:
|
||||
1 means the operation is not completed
|
||||
0 means the operation is completed
|
||||
]]
|
||||
local maxCount = tonumber(ARGV[1])
|
||||
local timestamp = tonumber(ARGV[2])
|
||||
|
||||
local rcall = redis.call;
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addBaseMarkerIfNeeded"
|
||||
--- @include "includes/batches"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
|
||||
local metaKey = KEYS[6]
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[7], KEYS[4], KEYS[5])
|
||||
|
||||
local jobs = rcall('ZRANGEBYSCORE', KEYS[3], 0, timestamp, 'LIMIT', 0, maxCount)
|
||||
if (#jobs > 0) then
|
||||
|
||||
if ARGV[3] == "failed" then
|
||||
for i, key in ipairs(jobs) do
|
||||
local jobKey = KEYS[1] .. key
|
||||
rcall("HDEL", jobKey, "finishedOn", "processedOn", "failedReason")
|
||||
end
|
||||
elseif ARGV[3] == "completed" then
|
||||
for i, key in ipairs(jobs) do
|
||||
local jobKey = KEYS[1] .. key
|
||||
rcall("HDEL", jobKey, "finishedOn", "processedOn", "returnvalue")
|
||||
end
|
||||
end
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
for i, key in ipairs(jobs) do
|
||||
-- Emit waiting event
|
||||
rcall("XADD", KEYS[2], "MAXLEN", "~", maxEvents, "*", "event",
|
||||
"waiting", "jobId", key, "prev", ARGV[3]);
|
||||
end
|
||||
|
||||
for from, to in batches(#jobs, 7000) do
|
||||
rcall("ZREM", KEYS[3], unpack(jobs, from, to))
|
||||
rcall("LPUSH", target, unpack(jobs, from, to))
|
||||
end
|
||||
|
||||
addBaseMarkerIfNeeded(KEYS[8], isPausedOrMaxed)
|
||||
end
|
||||
|
||||
maxCount = maxCount - #jobs
|
||||
|
||||
if (maxCount <= 0) then return 1 end
|
||||
|
||||
return 0
|
||||
113
backend/node_modules/bullmq/dist/cjs/commands/moveStalledJobsToWait-8.lua
generated
vendored
Normal file
113
backend/node_modules/bullmq/dist/cjs/commands/moveStalledJobsToWait-8.lua
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
--[[
|
||||
Move stalled jobs to wait.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'stalled' (SET)
|
||||
KEYS[2] 'wait', (LIST)
|
||||
KEYS[3] 'active', (LIST)
|
||||
KEYS[4] 'stalled-check', (KEY)
|
||||
KEYS[5] 'meta', (KEY)
|
||||
KEYS[6] 'paused', (LIST)
|
||||
KEYS[7] 'marker'
|
||||
KEYS[8] 'event stream' (STREAM)
|
||||
|
||||
ARGV[1] Max stalled job count
|
||||
ARGV[2] queue.toKey('')
|
||||
ARGV[3] timestamp
|
||||
ARGV[4] max check time
|
||||
|
||||
Events:
|
||||
'stalled' with stalled job id.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/batches"
|
||||
--- @include "includes/moveJobToWait"
|
||||
--- @include "includes/trimEvents"
|
||||
|
||||
local stalledKey = KEYS[1]
|
||||
local waitKey = KEYS[2]
|
||||
local activeKey = KEYS[3]
|
||||
local stalledCheckKey = KEYS[4]
|
||||
local metaKey = KEYS[5]
|
||||
local pausedKey = KEYS[6]
|
||||
local markerKey = KEYS[7]
|
||||
local eventStreamKey = KEYS[8]
|
||||
local maxStalledJobCount = tonumber(ARGV[1])
|
||||
local queueKeyPrefix = ARGV[2]
|
||||
local timestamp = ARGV[3]
|
||||
local maxCheckTime = ARGV[4]
|
||||
|
||||
if rcall("EXISTS", stalledCheckKey) == 1 then
|
||||
return {}
|
||||
end
|
||||
|
||||
rcall("SET", stalledCheckKey, timestamp, "PX", maxCheckTime)
|
||||
|
||||
-- Trim events before emiting them to avoid trimming events emitted in this script
|
||||
trimEvents(metaKey, eventStreamKey)
|
||||
|
||||
-- Move all stalled jobs to wait
|
||||
local stalling = rcall('SMEMBERS', stalledKey)
|
||||
local stalled = {}
|
||||
if (#stalling > 0) then
|
||||
rcall('DEL', stalledKey)
|
||||
|
||||
-- Remove from active list
|
||||
for i, jobId in ipairs(stalling) do
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
if string.sub(jobId, 1, 2) == "0:" then
|
||||
-- If the jobId is a delay marker ID we just remove it.
|
||||
rcall("LREM", activeKey, 1, jobId)
|
||||
else
|
||||
local jobKey = queueKeyPrefix .. jobId
|
||||
|
||||
-- Check that the lock is also missing, then we can handle this job as really stalled.
|
||||
if (rcall("EXISTS", jobKey .. ":lock") == 0) then
|
||||
-- Remove from the active queue.
|
||||
local removed = rcall("LREM", activeKey, 1, jobId)
|
||||
|
||||
if (removed > 0) then
|
||||
-- If this job has been stalled too many times, such as if it crashes the worker, then fail it.
|
||||
local stalledCount = rcall("HINCRBY", jobKey, "stc", 1)
|
||||
|
||||
-- Check if this is a repeatable job by looking at job options
|
||||
local jobOpts = rcall("HGET", jobKey, "opts")
|
||||
local isRepeatableJob = false
|
||||
if jobOpts then
|
||||
local opts = cjson.decode(jobOpts)
|
||||
if opts and opts["repeat"] then
|
||||
isRepeatableJob = true
|
||||
end
|
||||
end
|
||||
|
||||
-- Only fail job if it exceeds stall limit AND is not a repeatable job
|
||||
if stalledCount > maxStalledJobCount and not isRepeatableJob then
|
||||
local failedReason = "job stalled more than allowable limit"
|
||||
rcall("HSET", jobKey, "defa", failedReason)
|
||||
end
|
||||
|
||||
moveJobToWait(metaKey, activeKey, waitKey, pausedKey, markerKey, eventStreamKey, jobId,
|
||||
"RPUSH")
|
||||
|
||||
-- Emit the stalled event
|
||||
rcall("XADD", eventStreamKey, "*", "event", "stalled", "jobId", jobId)
|
||||
table.insert(stalled, jobId)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
-- Mark potentially stalled jobs
|
||||
local active = rcall('LRANGE', activeKey, 0, -1)
|
||||
|
||||
if (#active > 0) then
|
||||
for from, to in batches(#active, 7000) do
|
||||
rcall('SADD', stalledKey, unpack(active, from, to))
|
||||
end
|
||||
end
|
||||
|
||||
return stalled
|
||||
97
backend/node_modules/bullmq/dist/cjs/commands/moveToActive-11.lua
generated
vendored
Normal file
97
backend/node_modules/bullmq/dist/cjs/commands/moveToActive-11.lua
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
--[[
|
||||
Move next job to be processed to active, lock it and fetch its data. The job
|
||||
may be delayed, in that case we need to move it to the delayed set instead.
|
||||
|
||||
This operation guarantees that the worker owns the job during the lock
|
||||
expiration time. The worker is responsible of keeping the lock fresh
|
||||
so that no other worker picks this job again.
|
||||
|
||||
Input:
|
||||
KEYS[1] wait key
|
||||
KEYS[2] active key
|
||||
KEYS[3] prioritized key
|
||||
KEYS[4] stream events key
|
||||
KEYS[5] stalled key
|
||||
|
||||
-- Rate limiting
|
||||
KEYS[6] rate limiter key
|
||||
KEYS[7] delayed key
|
||||
|
||||
-- Delayed jobs
|
||||
KEYS[8] paused key
|
||||
KEYS[9] meta key
|
||||
KEYS[10] pc priority counter
|
||||
|
||||
-- Marker
|
||||
KEYS[11] marker key
|
||||
|
||||
-- Arguments
|
||||
ARGV[1] key prefix
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] opts
|
||||
|
||||
opts - token - lock token
|
||||
opts - lockDuration
|
||||
opts - limiter
|
||||
opts - name - worker name
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local waitKey = KEYS[1]
|
||||
local activeKey = KEYS[2]
|
||||
local eventStreamKey = KEYS[4]
|
||||
local rateLimiterKey = KEYS[6]
|
||||
local delayedKey = KEYS[7]
|
||||
local opts = cmsgpack.unpack(ARGV[3])
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/getNextDelayedTimestamp"
|
||||
--- @include "includes/getRateLimitTTL"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/moveJobFromPrioritizedToActive"
|
||||
--- @include "includes/prepareJobForProcessing"
|
||||
--- @include "includes/promoteDelayedJobs"
|
||||
|
||||
local target, isPausedOrMaxed, rateLimitMax, rateLimitDuration = getTargetQueueList(KEYS[9],
|
||||
activeKey, waitKey, KEYS[8])
|
||||
|
||||
-- Check if there are delayed jobs that we can move to wait.
|
||||
local markerKey = KEYS[11]
|
||||
promoteDelayedJobs(delayedKey, markerKey, target, KEYS[3], eventStreamKey, ARGV[1],
|
||||
ARGV[2], KEYS[10], isPausedOrMaxed)
|
||||
|
||||
local maxJobs = tonumber(rateLimitMax or (opts['limiter'] and opts['limiter']['max']))
|
||||
local expireTime = getRateLimitTTL(maxJobs, rateLimiterKey)
|
||||
|
||||
-- Check if we are rate limited first.
|
||||
if expireTime > 0 then return {0, 0, expireTime, 0} end
|
||||
|
||||
-- paused or maxed queue
|
||||
if isPausedOrMaxed then return {0, 0, 0, 0} end
|
||||
|
||||
local limiterDuration = (opts['limiter'] and opts['limiter']['duration']) or rateLimitDuration
|
||||
|
||||
-- no job ID, try non-blocking move from wait to active
|
||||
local jobId = rcall("RPOPLPUSH", waitKey, activeKey)
|
||||
|
||||
-- Markers in waitlist DEPRECATED in v5: Will be completely removed in v6.
|
||||
if jobId and string.sub(jobId, 1, 2) == "0:" then
|
||||
rcall("LREM", activeKey, 1, jobId)
|
||||
jobId = rcall("RPOPLPUSH", waitKey, activeKey)
|
||||
end
|
||||
|
||||
if jobId then
|
||||
return prepareJobForProcessing(ARGV[1], rateLimiterKey, eventStreamKey, jobId, ARGV[2],
|
||||
maxJobs, limiterDuration, markerKey, opts)
|
||||
else
|
||||
jobId = moveJobFromPrioritizedToActive(KEYS[3], activeKey, KEYS[10])
|
||||
if jobId then
|
||||
return prepareJobForProcessing(ARGV[1], rateLimiterKey, eventStreamKey, jobId, ARGV[2],
|
||||
maxJobs, limiterDuration, markerKey, opts)
|
||||
end
|
||||
end
|
||||
|
||||
-- Return the timestamp for the next delayed job if any.
|
||||
local nextTimestamp = getNextDelayedTimestamp(delayedKey)
|
||||
if nextTimestamp ~= nil then return {0, 0, 0, nextTimestamp} end
|
||||
|
||||
return {0, 0, 0, 0}
|
||||
78
backend/node_modules/bullmq/dist/cjs/commands/moveToDelayed-8.lua
generated
vendored
Normal file
78
backend/node_modules/bullmq/dist/cjs/commands/moveToDelayed-8.lua
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
--[[
|
||||
Moves job from active to delayed set.
|
||||
|
||||
Input:
|
||||
KEYS[1] marker key
|
||||
KEYS[2] active key
|
||||
KEYS[3] prioritized key
|
||||
KEYS[4] delayed key
|
||||
KEYS[5] job key
|
||||
KEYS[6] events stream
|
||||
KEYS[7] meta key
|
||||
KEYS[8] stalled key
|
||||
|
||||
ARGV[1] key prefix
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] the id of the job
|
||||
ARGV[4] queue token
|
||||
ARGV[5] delay value
|
||||
ARGV[6] skip attempt
|
||||
ARGV[7] optional job fields to update
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-1 - Missing job.
|
||||
-3 - Job not in active set.
|
||||
|
||||
Events:
|
||||
- delayed key.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayMarkerIfNeeded"
|
||||
--- @include "includes/getDelayedScore"
|
||||
--- @include "includes/getOrSetMaxEvents"
|
||||
--- @include "includes/removeLock"
|
||||
--- @include "includes/updateJobFields"
|
||||
|
||||
local jobKey = KEYS[5]
|
||||
local metaKey = KEYS[7]
|
||||
local token = ARGV[4]
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
local errorCode = removeLock(jobKey, KEYS[8], token, ARGV[3])
|
||||
if errorCode < 0 then
|
||||
return errorCode
|
||||
end
|
||||
|
||||
updateJobFields(jobKey, ARGV[7])
|
||||
|
||||
local delayedKey = KEYS[4]
|
||||
local jobId = ARGV[3]
|
||||
local delay = tonumber(ARGV[5])
|
||||
|
||||
local numRemovedElements = rcall("LREM", KEYS[2], -1, jobId)
|
||||
if numRemovedElements < 1 then return -3 end
|
||||
|
||||
local score, delayedTimestamp = getDelayedScore(delayedKey, ARGV[2], delay)
|
||||
|
||||
if ARGV[6] == "0" then
|
||||
rcall("HINCRBY", jobKey, "atm", 1)
|
||||
end
|
||||
|
||||
rcall("HSET", jobKey, "delay", ARGV[5])
|
||||
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
|
||||
rcall("ZADD", delayedKey, score, jobId)
|
||||
rcall("XADD", KEYS[6], "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- Check if we need to push a marker job to wake up sleeping workers.
|
||||
local markerKey = KEYS[1]
|
||||
addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
287
backend/node_modules/bullmq/dist/cjs/commands/moveToFinished-14.lua
generated
vendored
Normal file
287
backend/node_modules/bullmq/dist/cjs/commands/moveToFinished-14.lua
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
--[[
|
||||
Move job from active to a finished status (completed o failed)
|
||||
A job can only be moved to completed if it was active.
|
||||
The job must be locked before it can be moved to a finished status,
|
||||
and the lock must be released in this script.
|
||||
|
||||
Input:
|
||||
KEYS[1] wait key
|
||||
KEYS[2] active key
|
||||
KEYS[3] prioritized key
|
||||
KEYS[4] event stream key
|
||||
KEYS[5] stalled key
|
||||
|
||||
-- Rate limiting
|
||||
KEYS[6] rate limiter key
|
||||
KEYS[7] delayed key
|
||||
|
||||
KEYS[8] paused key
|
||||
KEYS[9] meta key
|
||||
KEYS[10] pc priority counter
|
||||
|
||||
KEYS[11] completed/failed key
|
||||
KEYS[12] jobId key
|
||||
KEYS[13] metrics key
|
||||
KEYS[14] marker key
|
||||
|
||||
ARGV[1] jobId
|
||||
ARGV[2] timestamp
|
||||
ARGV[3] msg property returnvalue / failedReason
|
||||
ARGV[4] return value / failed reason
|
||||
ARGV[5] target (completed/failed)
|
||||
ARGV[6] fetch next?
|
||||
ARGV[7] keys prefix
|
||||
ARGV[8] opts
|
||||
ARGV[9] job fields to update
|
||||
|
||||
opts - token - lock token
|
||||
opts - keepJobs
|
||||
opts - lockDuration - lock duration in milliseconds
|
||||
opts - attempts max attempts
|
||||
opts - maxMetricsSize
|
||||
opts - fpof - fail parent on fail
|
||||
opts - cpof - continue parent on fail
|
||||
opts - idof - ignore dependency on fail
|
||||
opts - rdof - remove dependency on fail
|
||||
opts - name - worker name
|
||||
|
||||
Output:
|
||||
0 OK
|
||||
-1 Missing key.
|
||||
-2 Missing lock.
|
||||
-3 Job not in active set
|
||||
-4 Job has pending children
|
||||
-6 Lock is not owned by this client
|
||||
-9 Job has failed children
|
||||
|
||||
Events:
|
||||
'completed/failed'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
--- Includes
|
||||
--- @include "includes/collectMetrics"
|
||||
--- @include "includes/getNextDelayedTimestamp"
|
||||
--- @include "includes/getRateLimitTTL"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
--- @include "includes/moveJobFromPrioritizedToActive"
|
||||
--- @include "includes/moveChildFromDependenciesIfNeeded"
|
||||
--- @include "includes/prepareJobForProcessing"
|
||||
--- @include "includes/promoteDelayedJobs"
|
||||
--- @include "includes/removeDeduplicationKeyIfNeededOnFinalization"
|
||||
--- @include "includes/removeJobKeys"
|
||||
--- @include "includes/removeJobsByMaxAge"
|
||||
--- @include "includes/removeJobsByMaxCount"
|
||||
--- @include "includes/removeLock"
|
||||
--- @include "includes/removeParentDependencyKey"
|
||||
--- @include "includes/trimEvents"
|
||||
--- @include "includes/updateParentDepsIfNeeded"
|
||||
--- @include "includes/updateJobFields"
|
||||
|
||||
local jobIdKey = KEYS[12]
|
||||
if rcall("EXISTS", jobIdKey) == 1 then -- Make sure job exists
|
||||
-- Make sure it does not have pending dependencies
|
||||
-- It must happen before removing lock
|
||||
if ARGV[5] == "completed" then
|
||||
if rcall("SCARD", jobIdKey .. ":dependencies") ~= 0 then
|
||||
return -4
|
||||
end
|
||||
|
||||
if rcall("ZCARD", jobIdKey .. ":unsuccessful") ~= 0 then
|
||||
return -9
|
||||
end
|
||||
end
|
||||
|
||||
local opts = cmsgpack.unpack(ARGV[8])
|
||||
|
||||
local token = opts['token']
|
||||
|
||||
local errorCode = removeLock(jobIdKey, KEYS[5], token, ARGV[1])
|
||||
if errorCode < 0 then
|
||||
return errorCode
|
||||
end
|
||||
|
||||
updateJobFields(jobIdKey, ARGV[9]);
|
||||
|
||||
local attempts = opts['attempts']
|
||||
local maxMetricsSize = opts['maxMetricsSize']
|
||||
local maxCount = opts['keepJobs']['count']
|
||||
local maxAge = opts['keepJobs']['age']
|
||||
local maxLimit = opts['keepJobs']['limit'] or 1000
|
||||
|
||||
local jobAttributes = rcall("HMGET", jobIdKey, "parentKey", "parent", "deid")
|
||||
local parentKey = jobAttributes[1] or ""
|
||||
local parentId = ""
|
||||
local parentQueueKey = ""
|
||||
if jobAttributes[2] then -- TODO: need to revisit this logic if it's still needed
|
||||
local jsonDecodedParent = cjson.decode(jobAttributes[2])
|
||||
parentId = jsonDecodedParent['id']
|
||||
parentQueueKey = jsonDecodedParent['queueKey']
|
||||
end
|
||||
|
||||
local jobId = ARGV[1]
|
||||
local timestamp = ARGV[2]
|
||||
|
||||
-- Remove from active list (if not active we shall return error)
|
||||
local numRemovedElements = rcall("LREM", KEYS[2], -1, jobId)
|
||||
|
||||
if (numRemovedElements < 1) then
|
||||
return -3
|
||||
end
|
||||
|
||||
local eventStreamKey = KEYS[4]
|
||||
local metaKey = KEYS[9]
|
||||
-- Trim events before emiting them to avoid trimming events emitted in this script
|
||||
trimEvents(metaKey, eventStreamKey)
|
||||
|
||||
local prefix = ARGV[7]
|
||||
|
||||
removeDeduplicationKeyIfNeededOnFinalization(prefix, jobAttributes[3], jobId)
|
||||
|
||||
-- If job has a parent we need to
|
||||
-- 1) remove this job id from parents dependencies
|
||||
-- 2) move the job Id to parent "processed" set
|
||||
-- 3) push the results into parent "results" list
|
||||
-- 4) if parent's dependencies is empty, then move parent to "wait/paused". Note it may be a different queue!.
|
||||
if parentId == "" and parentKey ~= "" then
|
||||
parentId = getJobIdFromKey(parentKey)
|
||||
parentQueueKey = getJobKeyPrefix(parentKey, ":" .. parentId)
|
||||
end
|
||||
|
||||
if parentId ~= "" then
|
||||
if ARGV[5] == "completed" then
|
||||
local dependenciesSet = parentKey .. ":dependencies"
|
||||
if rcall("SREM", dependenciesSet, jobIdKey) == 1 then
|
||||
updateParentDepsIfNeeded(parentKey, parentQueueKey, dependenciesSet, parentId, jobIdKey, ARGV[4],
|
||||
timestamp)
|
||||
end
|
||||
else
|
||||
moveChildFromDependenciesIfNeeded(jobAttributes[2], jobIdKey, ARGV[4], timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
local attemptsMade = rcall("HINCRBY", jobIdKey, "atm", 1)
|
||||
|
||||
-- Remove job?
|
||||
if maxCount ~= 0 then
|
||||
local targetSet = KEYS[11]
|
||||
-- Add to complete/failed set
|
||||
rcall("ZADD", targetSet, timestamp, jobId)
|
||||
rcall("HSET", jobIdKey, ARGV[3], ARGV[4], "finishedOn", timestamp)
|
||||
-- "returnvalue" / "failedReason" and "finishedOn"
|
||||
|
||||
if ARGV[5] == "failed" then
|
||||
rcall("HDEL", jobIdKey, "defa")
|
||||
end
|
||||
|
||||
-- Remove old jobs?
|
||||
if maxAge ~= nil then
|
||||
removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix, maxLimit)
|
||||
end
|
||||
|
||||
if maxCount ~= nil and maxCount > 0 then
|
||||
removeJobsByMaxCount(maxCount, targetSet, prefix)
|
||||
end
|
||||
else
|
||||
removeJobKeys(jobIdKey)
|
||||
if parentKey ~= "" then
|
||||
-- TODO: when a child is removed when finished, result or failure in parent
|
||||
-- must not be deleted, those value references should be deleted when the parent
|
||||
-- is deleted
|
||||
removeParentDependencyKey(jobIdKey, false, parentKey, jobAttributes[3])
|
||||
end
|
||||
end
|
||||
|
||||
rcall("XADD", eventStreamKey, "*", "event", ARGV[5], "jobId", jobId, ARGV[3], ARGV[4], "prev", "active")
|
||||
|
||||
if ARGV[5] == "failed" then
|
||||
if tonumber(attemptsMade) >= tonumber(attempts) then
|
||||
rcall("XADD", eventStreamKey, "*", "event", "retries-exhausted", "jobId", jobId, "attemptsMade",
|
||||
attemptsMade)
|
||||
end
|
||||
end
|
||||
|
||||
-- Collect metrics
|
||||
if maxMetricsSize ~= "" then
|
||||
collectMetrics(KEYS[13], KEYS[13] .. ':data', maxMetricsSize, timestamp)
|
||||
end
|
||||
|
||||
-- Try to get next job to avoid an extra roundtrip if the queue is not closing,
|
||||
-- and not rate limited.
|
||||
if (ARGV[6] == "1") then
|
||||
|
||||
local target, isPausedOrMaxed, rateLimitMax, rateLimitDuration = getTargetQueueList(metaKey, KEYS[2],
|
||||
KEYS[1], KEYS[8])
|
||||
|
||||
local markerKey = KEYS[14]
|
||||
-- Check if there are delayed jobs that can be promoted
|
||||
promoteDelayedJobs(KEYS[7], markerKey, target, KEYS[3], eventStreamKey, prefix, timestamp, KEYS[10],
|
||||
isPausedOrMaxed)
|
||||
|
||||
local maxJobs = tonumber(rateLimitMax or (opts['limiter'] and opts['limiter']['max']))
|
||||
-- Check if we are rate limited first.
|
||||
local expireTime = getRateLimitTTL(maxJobs, KEYS[6])
|
||||
|
||||
if expireTime > 0 then
|
||||
return {0, 0, expireTime, 0}
|
||||
end
|
||||
|
||||
-- paused or maxed queue
|
||||
if isPausedOrMaxed then
|
||||
return {0, 0, 0, 0}
|
||||
end
|
||||
|
||||
local limiterDuration = (opts['limiter'] and opts['limiter']['duration']) or rateLimitDuration
|
||||
|
||||
jobId = rcall("RPOPLPUSH", KEYS[1], KEYS[2])
|
||||
|
||||
if jobId then
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
if string.sub(jobId, 1, 2) == "0:" then
|
||||
rcall("LREM", KEYS[2], 1, jobId)
|
||||
|
||||
-- If jobId is special ID 0:delay (delay greater than 0), then there is no job to process
|
||||
-- but if ID is 0:0, then there is at least 1 prioritized job to process
|
||||
if jobId == "0:0" then
|
||||
jobId = moveJobFromPrioritizedToActive(KEYS[3], KEYS[2], KEYS[10])
|
||||
return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, timestamp, maxJobs,
|
||||
limiterDuration, markerKey, opts)
|
||||
end
|
||||
else
|
||||
return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, timestamp, maxJobs,
|
||||
limiterDuration, markerKey, opts)
|
||||
end
|
||||
else
|
||||
jobId = moveJobFromPrioritizedToActive(KEYS[3], KEYS[2], KEYS[10])
|
||||
if jobId then
|
||||
return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, timestamp, maxJobs,
|
||||
limiterDuration, markerKey, opts)
|
||||
end
|
||||
end
|
||||
|
||||
-- Return the timestamp for the next delayed job if any.
|
||||
local nextTimestamp = getNextDelayedTimestamp(KEYS[7])
|
||||
if nextTimestamp ~= nil then
|
||||
-- The result is guaranteed to be positive, since the
|
||||
-- ZRANGEBYSCORE command would have return a job otherwise.
|
||||
return {0, 0, 0, nextTimestamp}
|
||||
end
|
||||
end
|
||||
|
||||
local waitLen = rcall("LLEN", KEYS[1])
|
||||
if waitLen == 0 then
|
||||
local activeLen = rcall("LLEN", KEYS[2])
|
||||
|
||||
if activeLen == 0 then
|
||||
local prioritizedLen = rcall("ZCARD", KEYS[3])
|
||||
|
||||
if prioritizedLen == 0 then
|
||||
rcall("XADD", eventStreamKey, "*", "event", "drained")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return 0
|
||||
else
|
||||
return -1
|
||||
end
|
||||
95
backend/node_modules/bullmq/dist/cjs/commands/moveToWaitingChildren-7.lua
generated
vendored
Normal file
95
backend/node_modules/bullmq/dist/cjs/commands/moveToWaitingChildren-7.lua
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
--[[
|
||||
Moves job from active to waiting children set.
|
||||
|
||||
Input:
|
||||
KEYS[1] active key
|
||||
KEYS[2] wait-children key
|
||||
KEYS[3] job key
|
||||
KEYS[4] job dependencies key
|
||||
KEYS[5] job unsuccessful key
|
||||
KEYS[6] stalled key
|
||||
KEYS[7] events key
|
||||
|
||||
ARGV[1] token
|
||||
ARGV[2] child key
|
||||
ARGV[3] timestamp
|
||||
ARGV[4] jobId
|
||||
ARGV[5] prefix
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
1 - There are not pending dependencies.
|
||||
-1 - Missing job.
|
||||
-2 - Missing lock
|
||||
-3 - Job not in active set
|
||||
-9 - Job has failed children
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local activeKey = KEYS[1]
|
||||
local waitingChildrenKey = KEYS[2]
|
||||
local jobKey = KEYS[3]
|
||||
local jobDependenciesKey = KEYS[4]
|
||||
local jobUnsuccessfulKey = KEYS[5]
|
||||
local stalledKey = KEYS[6]
|
||||
local eventStreamKey = KEYS[7]
|
||||
local token = ARGV[1]
|
||||
local timestamp = ARGV[3]
|
||||
local jobId = ARGV[4]
|
||||
|
||||
--- Includes
|
||||
--- @include "includes/removeLock"
|
||||
|
||||
local function removeJobFromActive(activeKey, stalledKey, jobKey, jobId,
|
||||
token)
|
||||
local errorCode = removeLock(jobKey, stalledKey, token, jobId)
|
||||
if errorCode < 0 then
|
||||
return errorCode
|
||||
end
|
||||
|
||||
local numRemovedElements = rcall("LREM", activeKey, -1, jobId)
|
||||
|
||||
if numRemovedElements < 1 then
|
||||
return -3
|
||||
end
|
||||
|
||||
return 0
|
||||
end
|
||||
|
||||
local function moveToWaitingChildren(activeKey, waitingChildrenKey, stalledKey, eventStreamKey,
|
||||
jobKey, jobId, timestamp, token)
|
||||
local errorCode = removeJobFromActive(activeKey, stalledKey, jobKey, jobId, token)
|
||||
if errorCode < 0 then
|
||||
return errorCode
|
||||
end
|
||||
|
||||
local score = tonumber(timestamp)
|
||||
|
||||
rcall("ZADD", waitingChildrenKey, score, jobId)
|
||||
rcall("XADD", eventStreamKey, "*", "event", "waiting-children", "jobId", jobId, 'prev', 'active')
|
||||
|
||||
return 0
|
||||
end
|
||||
|
||||
if rcall("EXISTS", jobKey) == 1 then
|
||||
if rcall("ZCARD", jobUnsuccessfulKey) ~= 0 then
|
||||
return -9
|
||||
else
|
||||
if ARGV[2] ~= "" then
|
||||
if rcall("SISMEMBER", jobDependenciesKey, ARGV[2]) ~= 0 then
|
||||
return moveToWaitingChildren(activeKey, waitingChildrenKey, stalledKey, eventStreamKey,
|
||||
jobKey, jobId, timestamp, token)
|
||||
end
|
||||
|
||||
return 1
|
||||
else
|
||||
if rcall("SCARD", jobDependenciesKey) ~= 0 then
|
||||
return moveToWaitingChildren(activeKey, waitingChildrenKey, stalledKey, eventStreamKey,
|
||||
jobKey, jobId, timestamp, token)
|
||||
end
|
||||
|
||||
return 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return -1
|
||||
119
backend/node_modules/bullmq/dist/cjs/commands/obliterate-2.lua
generated
vendored
Normal file
119
backend/node_modules/bullmq/dist/cjs/commands/obliterate-2.lua
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
--[[
|
||||
Completely obliterates a queue and all of its contents
|
||||
This command completely destroys a queue including all of its jobs, current or past
|
||||
leaving no trace of its existence. Since this script needs to iterate to find all the job
|
||||
keys, consider that this call may be slow for very large queues.
|
||||
|
||||
The queue needs to be "paused" or it will return an error
|
||||
If the queue has currently active jobs then the script by default will return error,
|
||||
however this behaviour can be overrided using the 'force' option.
|
||||
|
||||
Input:
|
||||
KEYS[1] meta
|
||||
KEYS[2] base
|
||||
|
||||
ARGV[1] count
|
||||
ARGV[2] force
|
||||
]]
|
||||
|
||||
local maxCount = tonumber(ARGV[1])
|
||||
local baseKey = KEYS[2]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/removeJobs"
|
||||
--- @include "includes/removeListJobs"
|
||||
--- @include "includes/removeZSetJobs"
|
||||
|
||||
local function removeLockKeys(keys)
|
||||
for i, key in ipairs(keys) do
|
||||
rcall("DEL", baseKey .. key .. ':lock')
|
||||
end
|
||||
end
|
||||
|
||||
-- 1) Check if paused, if not return with error.
|
||||
if rcall("HEXISTS", KEYS[1], "paused") ~= 1 then
|
||||
return -1 -- Error, NotPaused
|
||||
end
|
||||
|
||||
-- 2) Check if there are active jobs, if there are and not "force" return error.
|
||||
local activeKey = baseKey .. 'active'
|
||||
local activeJobs = getListItems(activeKey, maxCount)
|
||||
if (#activeJobs > 0) then
|
||||
if(ARGV[2] == "") then
|
||||
return -2 -- Error, ExistActiveJobs
|
||||
end
|
||||
end
|
||||
|
||||
removeLockKeys(activeJobs)
|
||||
maxCount = removeJobs(activeJobs, true, baseKey, maxCount)
|
||||
rcall("LTRIM", activeKey, #activeJobs, -1)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local delayedKey = baseKey .. 'delayed'
|
||||
maxCount = removeZSetJobs(delayedKey, true, baseKey, maxCount)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local repeatKey = baseKey .. 'repeat'
|
||||
local repeatJobsIds = getZSetItems(repeatKey, maxCount)
|
||||
for i, key in ipairs(repeatJobsIds) do
|
||||
local jobKey = repeatKey .. ":" .. key
|
||||
rcall("DEL", jobKey)
|
||||
end
|
||||
if(#repeatJobsIds > 0) then
|
||||
for from, to in batches(#repeatJobsIds, 7000) do
|
||||
rcall("ZREM", repeatKey, unpack(repeatJobsIds, from, to))
|
||||
end
|
||||
end
|
||||
maxCount = maxCount - #repeatJobsIds
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local completedKey = baseKey .. 'completed'
|
||||
maxCount = removeZSetJobs(completedKey, true, baseKey, maxCount)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local waitKey = baseKey .. 'paused'
|
||||
maxCount = removeListJobs(waitKey, true, baseKey, maxCount)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local prioritizedKey = baseKey .. 'prioritized'
|
||||
maxCount = removeZSetJobs(prioritizedKey, true, baseKey, maxCount)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
local failedKey = baseKey .. 'failed'
|
||||
maxCount = removeZSetJobs(failedKey, true, baseKey, maxCount)
|
||||
if(maxCount <= 0) then
|
||||
return 1
|
||||
end
|
||||
|
||||
if(maxCount > 0) then
|
||||
rcall("DEL",
|
||||
baseKey .. 'events',
|
||||
baseKey .. 'delay',
|
||||
baseKey .. 'stalled-check',
|
||||
baseKey .. 'stalled',
|
||||
baseKey .. 'id',
|
||||
baseKey .. 'pc',
|
||||
baseKey .. 'marker',
|
||||
baseKey .. 'meta',
|
||||
baseKey .. 'metrics:completed',
|
||||
baseKey .. 'metrics:completed:data',
|
||||
baseKey .. 'metrics:failed',
|
||||
baseKey .. 'metrics:failed:data')
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/cjs/commands/paginate-1.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/cjs/commands/paginate-1.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Paginate a set or hash
|
||||
|
||||
Input:
|
||||
KEYS[1] key pointing to the set or hash to be paginated.
|
||||
|
||||
ARGV[1] page start offset
|
||||
ARGV[2] page end offset (-1 for all the elements)
|
||||
ARGV[3] cursor
|
||||
ARGV[4] offset
|
||||
ARGV[5] max iterations
|
||||
ARGV[6] fetch jobs?
|
||||
|
||||
Output:
|
||||
[cursor, offset, items, numItems]
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/findPage"
|
||||
|
||||
local key = KEYS[1]
|
||||
local scanCommand = "SSCAN"
|
||||
local countCommand = "SCARD"
|
||||
local type = rcall("TYPE", key)["ok"]
|
||||
|
||||
if type == "none" then
|
||||
return {0, 0, {}, 0}
|
||||
elseif type == "hash" then
|
||||
scanCommand = "HSCAN"
|
||||
countCommand = "HLEN"
|
||||
elseif type ~= "set" then
|
||||
return
|
||||
redis.error_reply("Pagination is only supported for sets and hashes.")
|
||||
end
|
||||
|
||||
local numItems = rcall(countCommand, key)
|
||||
local startOffset = tonumber(ARGV[1])
|
||||
local endOffset = tonumber(ARGV[2])
|
||||
if endOffset == -1 then
|
||||
endOffset = numItems
|
||||
end
|
||||
local pageSize = (endOffset - startOffset) + 1
|
||||
|
||||
local cursor, offset, items, jobs = findPage(key, scanCommand, startOffset,
|
||||
pageSize, ARGV[3], tonumber(ARGV[4]),
|
||||
tonumber(ARGV[5]), ARGV[6])
|
||||
|
||||
return {cursor, offset, items, numItems, jobs}
|
||||
42
backend/node_modules/bullmq/dist/cjs/commands/pause-7.lua
generated
vendored
Normal file
42
backend/node_modules/bullmq/dist/cjs/commands/pause-7.lua
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
--[[
|
||||
Pauses or resumes a queue globably.
|
||||
|
||||
Input:
|
||||
KEYS[1] 'wait' or 'paused''
|
||||
KEYS[2] 'paused' or 'wait'
|
||||
KEYS[3] 'meta'
|
||||
KEYS[4] 'prioritized'
|
||||
KEYS[5] events stream key
|
||||
KEYS[6] 'delayed'
|
||||
KEYS|7] 'marker'
|
||||
|
||||
ARGV[1] 'paused' or 'resumed'
|
||||
|
||||
Event:
|
||||
publish paused or resumed event.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addDelayMarkerIfNeeded"
|
||||
|
||||
local markerKey = KEYS[7]
|
||||
local hasJobs = rcall("EXISTS", KEYS[1]) == 1
|
||||
--TODO: check this logic to be reused when changing a delay
|
||||
if hasJobs then rcall("RENAME", KEYS[1], KEYS[2]) end
|
||||
|
||||
if ARGV[1] == "paused" then
|
||||
rcall("HSET", KEYS[3], "paused", 1)
|
||||
rcall("DEL", markerKey)
|
||||
else
|
||||
rcall("HDEL", KEYS[3], "paused")
|
||||
|
||||
if hasJobs or rcall("ZCARD", KEYS[4]) > 0 then
|
||||
-- Add marker if there are waiting or priority jobs
|
||||
rcall("ZADD", markerKey, 0, "0")
|
||||
else
|
||||
addDelayMarkerIfNeeded(markerKey, KEYS[6])
|
||||
end
|
||||
end
|
||||
|
||||
rcall("XADD", KEYS[5], "*", "event", ARGV[1]);
|
||||
61
backend/node_modules/bullmq/dist/cjs/commands/promote-9.lua
generated
vendored
Normal file
61
backend/node_modules/bullmq/dist/cjs/commands/promote-9.lua
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
--[[
|
||||
Promotes a job that is currently "delayed" to the "waiting" state
|
||||
|
||||
Input:
|
||||
KEYS[1] 'delayed'
|
||||
KEYS[2] 'wait'
|
||||
KEYS[3] 'paused'
|
||||
KEYS[4] 'meta'
|
||||
KEYS[5] 'prioritized'
|
||||
KEYS[6] 'active'
|
||||
KEYS[7] 'pc' priority counter
|
||||
KEYS[8] 'event stream'
|
||||
KEYS[9] 'marker'
|
||||
|
||||
ARGV[1] queue.toKey('')
|
||||
ARGV[2] jobId
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
-3 - Job not in delayed zset.
|
||||
|
||||
Events:
|
||||
'waiting'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local jobId = ARGV[2]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/addJobInTargetList"
|
||||
--- @include "includes/addJobWithPriority"
|
||||
--- @include "includes/getTargetQueueList"
|
||||
|
||||
if rcall("ZREM", KEYS[1], jobId) == 1 then
|
||||
local jobKey = ARGV[1] .. jobId
|
||||
local priority = tonumber(rcall("HGET", jobKey, "priority")) or 0
|
||||
local metaKey = KEYS[4]
|
||||
local markerKey = KEYS[9]
|
||||
|
||||
-- Remove delayed "marker" from the wait list if there is any.
|
||||
-- Since we are adding a job we do not need the marker anymore.
|
||||
-- Markers in waitlist DEPRECATED in v5: Remove in v6.
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[6], KEYS[2], KEYS[3])
|
||||
local marker = rcall("LINDEX", target, 0)
|
||||
if marker and string.sub(marker, 1, 2) == "0:" then rcall("LPOP", target) end
|
||||
|
||||
if priority == 0 then
|
||||
-- LIFO or FIFO
|
||||
addJobInTargetList(target, markerKey, "LPUSH", isPausedOrMaxed, jobId)
|
||||
else
|
||||
addJobWithPriority(markerKey, KEYS[5], priority, jobId, KEYS[7], isPausedOrMaxed)
|
||||
end
|
||||
|
||||
rcall("XADD", KEYS[8], "*", "event", "waiting", "jobId", jobId, "prev",
|
||||
"delayed");
|
||||
|
||||
rcall("HSET", jobKey, "delay", 0)
|
||||
|
||||
return 0
|
||||
else
|
||||
return -3
|
||||
end
|
||||
19
backend/node_modules/bullmq/dist/cjs/commands/releaseLock-1.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/cjs/commands/releaseLock-1.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Release lock
|
||||
|
||||
Input:
|
||||
KEYS[1] 'lock',
|
||||
|
||||
ARGV[1] token
|
||||
ARGV[2] lock duration in milliseconds
|
||||
|
||||
Output:
|
||||
"OK" if lock extented succesfully.
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
if rcall("GET", KEYS[1]) == ARGV[1] then
|
||||
return rcall("DEL", KEYS[1])
|
||||
else
|
||||
return 0
|
||||
end
|
||||
34
backend/node_modules/bullmq/dist/cjs/commands/removeChildDependency-1.lua
generated
vendored
Normal file
34
backend/node_modules/bullmq/dist/cjs/commands/removeChildDependency-1.lua
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
--[[
|
||||
Break parent-child dependency by removing
|
||||
child reference from parent
|
||||
|
||||
Input:
|
||||
KEYS[1] 'key' prefix,
|
||||
|
||||
ARGV[1] job key
|
||||
ARGV[2] parent key
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
1 - There is not relationship.
|
||||
-1 - Missing job key
|
||||
-5 - Missing parent key
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local jobKey = ARGV[1]
|
||||
local parentKey = ARGV[2]
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/removeParentDependencyKey"
|
||||
|
||||
if rcall("EXISTS", jobKey) ~= 1 then return -1 end
|
||||
|
||||
if rcall("EXISTS", parentKey) ~= 1 then return -5 end
|
||||
|
||||
if removeParentDependencyKey(jobKey, false, parentKey, KEYS[1], nil) then
|
||||
rcall("HDEL", jobKey, "parentKey", "parent")
|
||||
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
end
|
||||
22
backend/node_modules/bullmq/dist/cjs/commands/removeDeduplicationKey-1.lua
generated
vendored
Normal file
22
backend/node_modules/bullmq/dist/cjs/commands/removeDeduplicationKey-1.lua
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
--[[
|
||||
Remove deduplication key if it matches the job id.
|
||||
|
||||
Input:
|
||||
KEYS[1] deduplication key
|
||||
|
||||
ARGV[1] job id
|
||||
|
||||
Output:
|
||||
0 - false
|
||||
1 - true
|
||||
]]
|
||||
local rcall = redis.call
|
||||
local deduplicationKey = KEYS[1]
|
||||
local jobId = ARGV[1]
|
||||
|
||||
local currentJobId = rcall('GET', deduplicationKey)
|
||||
if currentJobId and currentJobId == jobId then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
|
||||
return 0
|
||||
44
backend/node_modules/bullmq/dist/cjs/commands/removeJob-2.lua
generated
vendored
Normal file
44
backend/node_modules/bullmq/dist/cjs/commands/removeJob-2.lua
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
--[[
|
||||
Remove a job from all the statuses it may be in as well as all its data.
|
||||
In order to be able to remove a job, it cannot be active.
|
||||
|
||||
Input:
|
||||
KEYS[1] jobKey
|
||||
KEYS[2] repeat key
|
||||
|
||||
ARGV[1] jobId
|
||||
ARGV[2] remove children
|
||||
ARGV[3] queue prefix
|
||||
|
||||
Events:
|
||||
'removed'
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/isJobSchedulerJob"
|
||||
--- @include "includes/isLocked"
|
||||
--- @include "includes/removeJobWithChildren"
|
||||
|
||||
local jobId = ARGV[1]
|
||||
local shouldRemoveChildren = ARGV[2]
|
||||
local prefix = ARGV[3]
|
||||
local jobKey = KEYS[1]
|
||||
local repeatKey = KEYS[2]
|
||||
|
||||
if isJobSchedulerJob(jobId, jobKey, repeatKey) then
|
||||
return -8
|
||||
end
|
||||
|
||||
if not isLocked(prefix, jobId, shouldRemoveChildren) then
|
||||
local options = {
|
||||
removeChildren = shouldRemoveChildren == "1",
|
||||
ignoreProcessed = false,
|
||||
ignoreLocked = false
|
||||
}
|
||||
|
||||
removeJobWithChildren(prefix, jobId, nil, options)
|
||||
return 1
|
||||
end
|
||||
return 0
|
||||
43
backend/node_modules/bullmq/dist/cjs/commands/removeJobScheduler-3.lua
generated
vendored
Normal file
43
backend/node_modules/bullmq/dist/cjs/commands/removeJobScheduler-3.lua
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
|
||||
--[[
|
||||
Removes a job scheduler and its next scheduled job.
|
||||
Input:
|
||||
KEYS[1] job schedulers key
|
||||
KEYS[2] delayed jobs key
|
||||
KEYS[3] events key
|
||||
|
||||
ARGV[1] job scheduler id
|
||||
ARGV[2] prefix key
|
||||
|
||||
Output:
|
||||
0 - OK
|
||||
1 - Missing repeat job
|
||||
|
||||
Events:
|
||||
'removed'
|
||||
]]
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "includes/removeJobKeys"
|
||||
|
||||
local jobSchedulerId = ARGV[1]
|
||||
local prefix = ARGV[2]
|
||||
|
||||
local millis = rcall("ZSCORE", KEYS[1], jobSchedulerId)
|
||||
|
||||
if millis then
|
||||
-- Delete next programmed job.
|
||||
local delayedJobId = "repeat:" .. jobSchedulerId .. ":" .. millis
|
||||
if(rcall("ZREM", KEYS[2], delayedJobId) == 1) then
|
||||
removeJobKeys(prefix .. delayedJobId)
|
||||
rcall("XADD", KEYS[3], "*", "event", "removed", "jobId", delayedJobId, "prev", "delayed")
|
||||
end
|
||||
end
|
||||
|
||||
if(rcall("ZREM", KEYS[1], jobSchedulerId) == 1) then
|
||||
rcall("DEL", KEYS[1] .. ":" .. jobSchedulerId)
|
||||
return 0
|
||||
end
|
||||
|
||||
return 1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user