Projektstart
This commit is contained in:
9
backend/node_modules/bullmq/dist/esm/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/esm/commands/includes/addBaseMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
--[[
|
||||
Add marker if needed when a job is available.
|
||||
]]
|
||||
|
||||
local function addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
if not isPausedOrMaxed then
|
||||
rcall("ZADD", markerKey, 0, "0")
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/addDelayMarkerIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getNextDelayedTimestamp"
|
||||
|
||||
local function addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
local nextTimestamp = getNextDelayedTimestamp(delayedKey)
|
||||
if nextTimestamp ~= nil then
|
||||
-- Replace the score of the marker with the newest known
|
||||
-- next timestamp.
|
||||
rcall("ZADD", markerKey, nextTimestamp, "1")
|
||||
end
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/esm/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/esm/commands/includes/addDelayedJob.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Adds a delayed job to the queue by doing the following:
|
||||
- Creates a new job key with the job data.
|
||||
- adds to delayed zset.
|
||||
- Emits a global event 'delayed' if the job is delayed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayMarkerIfNeeded"
|
||||
--- @include "getDelayedScore"
|
||||
|
||||
local function addDelayedJob(jobId, delayedKey, eventsKey, timestamp,
|
||||
maxEvents, markerKey, delay)
|
||||
|
||||
local score, delayedTimestamp = getDelayedScore(delayedKey, timestamp, tonumber(delay))
|
||||
|
||||
rcall("ZADD", delayedKey, score, jobId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "delayed",
|
||||
"jobId", jobId, "delay", delayedTimestamp)
|
||||
|
||||
-- mark that a delayed job is available
|
||||
addDelayMarkerIfNeeded(markerKey, delayedKey)
|
||||
end
|
||||
39
backend/node_modules/bullmq/dist/esm/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
39
backend/node_modules/bullmq/dist/esm/commands/includes/addJobFromScheduler.lua
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
--[[
|
||||
Add delay marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayedJob"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "isQueuePaused"
|
||||
--- @include "storeJob"
|
||||
--- @include "getTargetQueueList"
|
||||
--- @include "addJobInTargetList"
|
||||
|
||||
local function addJobFromScheduler(jobKey, jobId, opts, waitKey, pausedKey, activeKey, metaKey,
|
||||
prioritizedKey, priorityCounter, delayedKey, markerKey, eventsKey, name, maxEvents, timestamp,
|
||||
data, jobSchedulerId, repeatDelay)
|
||||
|
||||
opts['delay'] = repeatDelay
|
||||
opts['jobId'] = jobId
|
||||
|
||||
local delay, priority = storeJob(eventsKey, jobKey, jobId, name, data,
|
||||
opts, timestamp, nil, nil, jobSchedulerId)
|
||||
|
||||
if delay ~= 0 then
|
||||
addDelayedJob(jobId, delayedKey, eventsKey, timestamp, maxEvents, markerKey, delay)
|
||||
else
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey)
|
||||
|
||||
-- Standard or priority add
|
||||
if priority == 0 then
|
||||
local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH'
|
||||
addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
else
|
||||
-- Priority add
|
||||
addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounter, isPausedOrMaxed)
|
||||
end
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", jobId)
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/esm/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/esm/commands/includes/addJobInTargetList.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to add job in target list and add marker if needed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
|
||||
local function addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
rcall(pushCmd, targetKey, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/esm/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/esm/commands/includes/addJobWithPriority.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to add job considering priority.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
--- @include "getPriorityScore"
|
||||
|
||||
local function addJobWithPriority(markerKey, prioritizedKey, priority, jobId, priorityCounterKey,
|
||||
isPausedOrMaxed)
|
||||
local score = getPriorityScore(priority, priorityCounterKey)
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
addBaseMarkerIfNeeded(markerKey, isPausedOrMaxed)
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/esm/commands/includes/batches.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/esm/commands/includes/batches.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to loop in batches.
|
||||
Just a bit of warning, some commands as ZREM
|
||||
could receive a maximum of 7000 parameters per call.
|
||||
]]
|
||||
|
||||
local function batches(n, batchSize)
|
||||
local i = 0
|
||||
|
||||
return function()
|
||||
local from = i * batchSize + 1
|
||||
i = i + 1
|
||||
if (from <= n) then
|
||||
local to = math.min(from + batchSize - 1, n)
|
||||
return from, to
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/esm/commands/includes/checkItemInList.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/esm/commands/includes/checkItemInList.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to check if a item belongs to a list.
|
||||
]]
|
||||
|
||||
local function checkItemInList(list, item)
|
||||
for _, v in pairs(list) do
|
||||
if v == item then
|
||||
return 1
|
||||
end
|
||||
end
|
||||
return nil
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/esm/commands/includes/cleanList.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/esm/commands/includes/cleanList.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Function to clean job list.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanList(listKey, jobKeyPrefix, rangeStart, rangeEnd,
|
||||
timestamp, isWaiting, jobSchedulersKey)
|
||||
local jobs = rcall("LRANGE", listKey, rangeStart, rangeEnd)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
local deletionMarker = ''
|
||||
local jobIdsLen = #jobs
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
if (isWaiting or rcall("EXISTS", jobKey .. ":lock") == 0) and
|
||||
not isJobSchedulerJob(job, jobKey, jobSchedulersKey) then
|
||||
-- Find the right timestamp of the job to compare to maxTimestamp:
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
-- * processedOn represents when the job was last attempted, but it doesn't get populated until
|
||||
-- the job is first tried
|
||||
-- * timestamp is the original job submission time
|
||||
-- Fetch all three of these (in that order) and use the first one that is set so that we'll leave jobs
|
||||
-- that have been active within the grace period:
|
||||
jobTS = getTimestamp(jobKey, {"finishedOn", "processedOn", "timestamp"})
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
-- replace the entry with a deletion marker; the actual deletion will
|
||||
-- occur at the end of the script
|
||||
rcall("LSET", listKey, rangeEnd - jobIdsLen + i, deletionMarker)
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]])
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
rcall("LREM", listKey, 0, deletionMarker)
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
58
backend/node_modules/bullmq/dist/esm/commands/includes/cleanSet.lua
generated
vendored
Normal file
58
backend/node_modules/bullmq/dist/esm/commands/includes/cleanSet.lua
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
--[[
|
||||
Function to clean job set.
|
||||
Returns jobIds and deleted count number.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "batches"
|
||||
--- @include "getJobsInZset"
|
||||
--- @include "getTimestamp"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeJob"
|
||||
|
||||
local function cleanSet(
|
||||
setKey,
|
||||
jobKeyPrefix,
|
||||
rangeEnd,
|
||||
timestamp,
|
||||
limit,
|
||||
attributes,
|
||||
isFinished,
|
||||
jobSchedulersKey)
|
||||
local jobs = getJobsInZset(setKey, rangeEnd, limit)
|
||||
local deleted = {}
|
||||
local deletedCount = 0
|
||||
local jobTS
|
||||
for i, job in ipairs(jobs) do
|
||||
if limit > 0 and deletedCount >= limit then
|
||||
break
|
||||
end
|
||||
|
||||
local jobKey = jobKeyPrefix .. job
|
||||
-- Extract a Job Scheduler Id from jobId ("repeat:job-scheduler-id:millis")
|
||||
-- and check if it is in the scheduled jobs
|
||||
if not (jobSchedulersKey and isJobSchedulerJob(job, jobKey, jobSchedulersKey)) then
|
||||
if isFinished then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
else
|
||||
-- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed
|
||||
jobTS = getTimestamp(jobKey, attributes)
|
||||
if (not jobTS or jobTS <= timestamp) then
|
||||
removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] )
|
||||
deletedCount = deletedCount + 1
|
||||
table.insert(deleted, job)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if (#deleted > 0) then
|
||||
for from, to in batches(#deleted, 7000) do
|
||||
rcall("ZREM", setKey, unpack(deleted, from, to))
|
||||
end
|
||||
end
|
||||
|
||||
return {deleted, deletedCount}
|
||||
end
|
||||
46
backend/node_modules/bullmq/dist/esm/commands/includes/collectMetrics.lua
generated
vendored
Normal file
46
backend/node_modules/bullmq/dist/esm/commands/includes/collectMetrics.lua
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
--[[
|
||||
Functions to collect metrics based on a current and previous count of jobs.
|
||||
Granualarity is fixed at 1 minute.
|
||||
]]
|
||||
--- @include "batches"
|
||||
local function collectMetrics(metaKey, dataPointsList, maxDataPoints,
|
||||
timestamp)
|
||||
-- Increment current count
|
||||
local count = rcall("HINCRBY", metaKey, "count", 1) - 1
|
||||
|
||||
-- Compute how many data points we need to add to the list, N.
|
||||
local prevTS = rcall("HGET", metaKey, "prevTS")
|
||||
|
||||
if not prevTS then
|
||||
-- If prevTS is nil, set it to the current timestamp
|
||||
rcall("HSET", metaKey, "prevTS", timestamp, "prevCount", 0)
|
||||
return
|
||||
end
|
||||
|
||||
local N = math.min(math.floor(timestamp / 60000) - math.floor(prevTS / 60000), tonumber(maxDataPoints))
|
||||
|
||||
if N > 0 then
|
||||
local delta = count - rcall("HGET", metaKey, "prevCount")
|
||||
-- If N > 1, add N-1 zeros to the list
|
||||
if N > 1 then
|
||||
local points = {}
|
||||
points[1] = delta
|
||||
for i = 2, N do
|
||||
points[i] = 0
|
||||
end
|
||||
|
||||
for from, to in batches(#points, 7000) do
|
||||
rcall("LPUSH", dataPointsList, unpack(points, from, to))
|
||||
end
|
||||
else
|
||||
-- LPUSH delta to the list
|
||||
rcall("LPUSH", dataPointsList, delta)
|
||||
end
|
||||
|
||||
-- LTRIM to keep list to its max size
|
||||
rcall("LTRIM", dataPointsList, 0, maxDataPoints - 1)
|
||||
|
||||
-- update prev count with current count
|
||||
rcall("HSET", metaKey, "prevCount", count, "prevTS", timestamp)
|
||||
end
|
||||
end
|
||||
102
backend/node_modules/bullmq/dist/esm/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
102
backend/node_modules/bullmq/dist/esm/commands/includes/deduplicateJob.lua
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
--[[
|
||||
Function to debounce a job.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "removeJobKeys"
|
||||
|
||||
local function removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents, currentDeduplicatedJobId,
|
||||
jobId, deduplicationId, prefix)
|
||||
if rcall("ZREM", delayedKey, currentDeduplicatedJobId) > 0 then
|
||||
removeJobKeys(prefix .. currentDeduplicatedJobId)
|
||||
rcall("XADD", eventsKey, "*", "event", "removed", "jobId", currentDeduplicatedJobId,
|
||||
"prev", "delayed")
|
||||
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
jobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
jobId, "deduplicationId", deduplicationId, "deduplicatedJobId", currentDeduplicatedJobId)
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local function deduplicateJob(deduplicationOpts, jobId, delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
prefix)
|
||||
local deduplicationId = deduplicationOpts and deduplicationOpts['id']
|
||||
if deduplicationId then
|
||||
local ttl = deduplicationOpts['ttl']
|
||||
if deduplicationOpts['replace'] then
|
||||
if ttl and ttl > 0 then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
if isRemoved then
|
||||
if deduplicationOpts['extend'] then
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'KEEPTTL')
|
||||
end
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
local isRemoved = removeDelayedJob(delayedKey, deduplicationKey, eventsKey, maxEvents,
|
||||
currentDebounceJobId, jobId, deduplicationId, prefix)
|
||||
|
||||
if isRemoved then
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
else
|
||||
return currentDebounceJobId
|
||||
end
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId)
|
||||
return
|
||||
end
|
||||
end
|
||||
else
|
||||
local deduplicationKeyExists
|
||||
if ttl and ttl > 0 then
|
||||
if deduplicationOpts['extend'] then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
if currentDebounceJobId then
|
||||
rcall('SET', deduplicationKey, currentDebounceJobId, 'PX', ttl)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced",
|
||||
"jobId", currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
else
|
||||
rcall('SET', deduplicationKey, jobId, 'PX', ttl)
|
||||
return
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'PX', ttl, 'NX')
|
||||
end
|
||||
else
|
||||
deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'NX')
|
||||
end
|
||||
|
||||
if deduplicationKeyExists then
|
||||
local currentDebounceJobId = rcall('GET', deduplicationKey)
|
||||
-- TODO remove debounced event in next breaking change
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "debounced", "jobId",
|
||||
currentDebounceJobId, "debounceId", deduplicationId)
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "deduplicated", "jobId",
|
||||
currentDebounceJobId, "deduplicationId", deduplicationId, "deduplicatedJobId", jobId)
|
||||
return currentDebounceJobId
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/esm/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/esm/commands/includes/destructureJobKey.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Functions to destructure job key.
|
||||
Just a bit of warning, these functions may be a bit slow and affect performance significantly.
|
||||
]]
|
||||
|
||||
local getJobIdFromKey = function (jobKey)
|
||||
return string.match(jobKey, ".*:(.*)")
|
||||
end
|
||||
|
||||
local getJobKeyPrefix = function (jobKey, jobId)
|
||||
return string.sub(jobKey, 0, #jobKey - #jobId)
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/esm/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/esm/commands/includes/filterOutJobsToIgnore.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Function to filter out jobs to ignore from a table.
|
||||
]]
|
||||
|
||||
local function filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
local filteredJobs = {}
|
||||
for i = 1, #jobs do
|
||||
if not jobsToIgnore[jobs[i]] then
|
||||
table.insert(filteredJobs, jobs[i])
|
||||
end
|
||||
end
|
||||
return filteredJobs
|
||||
end
|
||||
|
||||
70
backend/node_modules/bullmq/dist/esm/commands/includes/findPage.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/esm/commands/includes/findPage.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Function to achieve pagination for a set or hash.
|
||||
This function simulates pagination in the most efficient way possible
|
||||
for a set using sscan or hscan.
|
||||
|
||||
The main limitation is that sets are not order preserving, so the
|
||||
pagination is not stable. This means that if the set is modified
|
||||
between pages, the same element may appear in different pages.
|
||||
]] -- Maximum number of elements to be returned by sscan per iteration.
|
||||
local maxCount = 100
|
||||
|
||||
-- Finds the cursor, and returns the first elements available for the requested page.
|
||||
local function findPage(key, command, pageStart, pageSize, cursor, offset,
|
||||
maxIterations, fetchJobs)
|
||||
local items = {}
|
||||
local jobs = {}
|
||||
local iterations = 0
|
||||
|
||||
repeat
|
||||
-- Iterate over the set using sscan/hscan.
|
||||
local result = rcall(command, key, cursor, "COUNT", maxCount)
|
||||
|
||||
cursor = result[1]
|
||||
local members = result[2]
|
||||
local step = 1
|
||||
if command == "HSCAN" then
|
||||
step = 2
|
||||
end
|
||||
|
||||
if #members == 0 then
|
||||
-- If the result is empty, we can return the result.
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
|
||||
local chunkStart = offset
|
||||
local chunkEnd = offset + #members / step
|
||||
|
||||
local pageEnd = pageStart + pageSize
|
||||
|
||||
if chunkEnd < pageStart then
|
||||
-- If the chunk is before the page, we can skip it.
|
||||
offset = chunkEnd
|
||||
elseif chunkStart > pageEnd then
|
||||
-- If the chunk is after the page, we can return the result.
|
||||
return cursor, offset, items, jobs
|
||||
else
|
||||
-- If the chunk is overlapping the page, we need to add the elements to the result.
|
||||
for i = 1, #members, step do
|
||||
if offset >= pageEnd then
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
if offset >= pageStart then
|
||||
local index = #items + 1
|
||||
if fetchJobs ~= nil then
|
||||
jobs[#jobs+1] = rcall("HGETALL", members[i])
|
||||
end
|
||||
if step == 2 then
|
||||
items[index] = {members[i], members[i + 1]}
|
||||
else
|
||||
items[index] = members[i]
|
||||
end
|
||||
end
|
||||
offset = offset + 1
|
||||
end
|
||||
end
|
||||
iterations = iterations + 1
|
||||
until cursor == "0" or iterations >= maxIterations
|
||||
|
||||
return cursor, offset, items, jobs
|
||||
end
|
||||
25
backend/node_modules/bullmq/dist/esm/commands/includes/getDelayedScore.lua
generated
vendored
Normal file
25
backend/node_modules/bullmq/dist/esm/commands/includes/getDelayedScore.lua
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
--[[
|
||||
Bake in the job id first 12 bits into the timestamp
|
||||
to guarantee correct execution order of delayed jobs
|
||||
(up to 4096 jobs per given timestamp or 4096 jobs apart per timestamp)
|
||||
WARNING: Jobs that are so far apart that they wrap around will cause FIFO to fail
|
||||
]]
|
||||
local function getDelayedScore(delayedKey, timestamp, delay)
|
||||
local delayedTimestamp = (delay > 0 and (tonumber(timestamp) + delay)) or tonumber(timestamp)
|
||||
local minScore = delayedTimestamp * 0x1000
|
||||
local maxScore = (delayedTimestamp + 1 ) * 0x1000 - 1
|
||||
|
||||
local result = rcall("ZREVRANGEBYSCORE", delayedKey, maxScore,
|
||||
minScore, "WITHSCORES","LIMIT", 0, 1)
|
||||
if #result then
|
||||
local currentMaxScore = tonumber(result[2])
|
||||
if currentMaxScore ~= nil then
|
||||
if currentMaxScore >= maxScore then
|
||||
return maxScore, delayedTimestamp
|
||||
else
|
||||
return currentMaxScore + 1, delayedTimestamp
|
||||
end
|
||||
end
|
||||
end
|
||||
return minScore, delayedTimestamp
|
||||
end
|
||||
28
backend/node_modules/bullmq/dist/esm/commands/includes/getJobSchedulerEveryNextMillis.lua
generated
vendored
Normal file
28
backend/node_modules/bullmq/dist/esm/commands/includes/getJobSchedulerEveryNextMillis.lua
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
|
||||
|
||||
local function getJobSchedulerEveryNextMillis(prevMillis, every, now, offset, startDate)
|
||||
local nextMillis
|
||||
if not prevMillis then
|
||||
if startDate then
|
||||
-- Assuming startDate is passed as milliseconds from JavaScript
|
||||
nextMillis = tonumber(startDate)
|
||||
nextMillis = nextMillis > now and nextMillis or now
|
||||
else
|
||||
nextMillis = now
|
||||
end
|
||||
else
|
||||
nextMillis = prevMillis + every
|
||||
-- check if we may have missed some iterations
|
||||
if nextMillis < now then
|
||||
nextMillis = math.floor(now / every) * every + every + (offset or 0)
|
||||
end
|
||||
end
|
||||
|
||||
if not offset or offset == 0 then
|
||||
local timeSlot = math.floor(nextMillis / every) * every;
|
||||
offset = nextMillis - timeSlot;
|
||||
end
|
||||
|
||||
-- Return a tuple nextMillis, offset
|
||||
return math.floor(nextMillis), math.floor(offset)
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/esm/commands/includes/getJobsInZset.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/esm/commands/includes/getJobsInZset.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
We use ZRANGEBYSCORE to make the case where we're deleting a limited number
|
||||
of items in a sorted set only run a single iteration. If we simply used
|
||||
ZRANGE, we may take a long time traversing through jobs that are within the
|
||||
grace period.
|
||||
]]
|
||||
local function getJobsInZset(zsetKey, rangeEnd, limit)
|
||||
if limit > 0 then
|
||||
return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd, "LIMIT", 0, limit)
|
||||
else
|
||||
return rcall("ZRANGEBYSCORE", zsetKey, 0, rangeEnd)
|
||||
end
|
||||
end
|
||||
12
backend/node_modules/bullmq/dist/esm/commands/includes/getNextDelayedTimestamp.lua
generated
vendored
Normal file
12
backend/node_modules/bullmq/dist/esm/commands/includes/getNextDelayedTimestamp.lua
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--[[
|
||||
Function to return the next delayed job timestamp.
|
||||
]]
|
||||
local function getNextDelayedTimestamp(delayedKey)
|
||||
local result = rcall("ZRANGE", delayedKey, 0, 0, "WITHSCORES")
|
||||
if #result then
|
||||
local nextTimestamp = tonumber(result[2])
|
||||
if nextTimestamp ~= nil then
|
||||
return nextTimestamp / 0x1000
|
||||
end
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/esm/commands/includes/getOrSetMaxEvents.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/esm/commands/includes/getOrSetMaxEvents.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to get max events value or set by default 10000.
|
||||
]]
|
||||
local function getOrSetMaxEvents(metaKey)
|
||||
local maxEvents = rcall("HGET", metaKey, "opts.maxLenEvents")
|
||||
if not maxEvents then
|
||||
maxEvents = 10000
|
||||
rcall("HSET", metaKey, "opts.maxLenEvents", maxEvents)
|
||||
end
|
||||
return maxEvents
|
||||
end
|
||||
8
backend/node_modules/bullmq/dist/esm/commands/includes/getPriorityScore.lua
generated
vendored
Normal file
8
backend/node_modules/bullmq/dist/esm/commands/includes/getPriorityScore.lua
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
--[[
|
||||
Function to get priority score.
|
||||
]]
|
||||
|
||||
local function getPriorityScore(priority, priorityCounterKey)
|
||||
local prioCounter = rcall("INCR", priorityCounterKey)
|
||||
return priority * 0x100000000 + prioCounter % 0x100000000
|
||||
end
|
||||
17
backend/node_modules/bullmq/dist/esm/commands/includes/getRateLimitTTL.lua
generated
vendored
Normal file
17
backend/node_modules/bullmq/dist/esm/commands/includes/getRateLimitTTL.lua
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
--[[
|
||||
Function to get current rate limit ttl.
|
||||
]]
|
||||
local function getRateLimitTTL(maxJobs, rateLimiterKey)
|
||||
if maxJobs and maxJobs <= tonumber(rcall("GET", rateLimiterKey) or 0) then
|
||||
local pttl = rcall("PTTL", rateLimiterKey)
|
||||
|
||||
if pttl == 0 then
|
||||
rcall("DEL", rateLimiterKey)
|
||||
end
|
||||
|
||||
if pttl > 0 then
|
||||
return pttl
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
22
backend/node_modules/bullmq/dist/esm/commands/includes/getTargetQueueList.lua
generated
vendored
Normal file
22
backend/node_modules/bullmq/dist/esm/commands/includes/getTargetQueueList.lua
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
--[[
|
||||
Function to check for the meta.paused key to decide if we are paused or not
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
|
||||
local function getTargetQueueList(queueMetaKey, activeKey, waitKey, pausedKey)
|
||||
local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency", "max", "duration")
|
||||
|
||||
if queueAttributes[1] then
|
||||
return pausedKey, true, queueAttributes[3], queueAttributes[4]
|
||||
else
|
||||
if queueAttributes[2] then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
if activeCount >= tonumber(queueAttributes[2]) then
|
||||
return waitKey, true, queueAttributes[3], queueAttributes[4]
|
||||
else
|
||||
return waitKey, false, queueAttributes[3], queueAttributes[4]
|
||||
end
|
||||
end
|
||||
end
|
||||
return waitKey, false, queueAttributes[3], queueAttributes[4]
|
||||
end
|
||||
19
backend/node_modules/bullmq/dist/esm/commands/includes/getTimestamp.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/esm/commands/includes/getTimestamp.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
--[[
|
||||
Function to get the latest saved timestamp.
|
||||
]]
|
||||
|
||||
local function getTimestamp(jobKey, attributes)
|
||||
if #attributes == 1 then
|
||||
return rcall("HGET", jobKey, attributes[1])
|
||||
end
|
||||
|
||||
local jobTs
|
||||
for _, ts in ipairs(rcall("HMGET", jobKey, unpack(attributes))) do
|
||||
if (ts) then
|
||||
jobTs = ts
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
return jobTs
|
||||
end
|
||||
7
backend/node_modules/bullmq/dist/esm/commands/includes/getZSetItems.lua
generated
vendored
Normal file
7
backend/node_modules/bullmq/dist/esm/commands/includes/getZSetItems.lua
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
--[[
|
||||
Function to get ZSet items.
|
||||
]]
|
||||
|
||||
local function getZSetItems(keyName, max)
|
||||
return rcall('ZRANGE', keyName, 0, max - 1)
|
||||
end
|
||||
26
backend/node_modules/bullmq/dist/esm/commands/includes/handleDuplicatedJob.lua
generated
vendored
Normal file
26
backend/node_modules/bullmq/dist/esm/commands/includes/handleDuplicatedJob.lua
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
--[[
|
||||
Function to handle the case when job is duplicated.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "updateExistingJobsParent"
|
||||
|
||||
local function handleDuplicatedJob(jobKey, jobId, currentParentKey, currentParent,
|
||||
parentData, parentDependenciesKey, completedKey, eventsKey, maxEvents, timestamp)
|
||||
local existedParentKey = rcall("HGET", jobKey, "parentKey")
|
||||
|
||||
if not existedParentKey or existedParentKey == currentParentKey then
|
||||
updateExistingJobsParent(currentParentKey, currentParent, parentData,
|
||||
parentDependenciesKey, completedKey, jobKey,
|
||||
jobId, timestamp)
|
||||
else
|
||||
if currentParentKey ~= nil and currentParentKey ~= existedParentKey
|
||||
and (rcall("EXISTS", existedParentKey) == 1) then
|
||||
return -7
|
||||
end
|
||||
end
|
||||
rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event",
|
||||
"duplicated", "jobId", jobId)
|
||||
|
||||
return jobId .. "" -- convert to string
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/isJobSchedulerJob.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/isJobSchedulerJob.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to check if the job belongs to a job scheduler and
|
||||
current delayed job matches with jobId
|
||||
]]
|
||||
local function isJobSchedulerJob(jobId, jobKey, jobSchedulersKey)
|
||||
local repeatJobKey = rcall("HGET", jobKey, "rjk")
|
||||
if repeatJobKey then
|
||||
local prevMillis = rcall("ZSCORE", jobSchedulersKey, repeatJobKey)
|
||||
if prevMillis then
|
||||
local currentDelayedJobId = "repeat:" .. repeatJobKey .. ":" .. prevMillis
|
||||
return jobId == currentDelayedJobId
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
34
backend/node_modules/bullmq/dist/esm/commands/includes/isLocked.lua
generated
vendored
Normal file
34
backend/node_modules/bullmq/dist/esm/commands/includes/isLocked.lua
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
--[[
|
||||
Function to recursively check if there are no locks
|
||||
on the jobs to be removed.
|
||||
|
||||
returns:
|
||||
boolean
|
||||
]]
|
||||
--- @include "destructureJobKey"
|
||||
|
||||
local function isLocked( prefix, jobId, removeChildren)
|
||||
local jobKey = prefix .. jobId;
|
||||
|
||||
-- Check if this job is locked
|
||||
local lockKey = jobKey .. ':lock'
|
||||
local lock = rcall("GET", lockKey)
|
||||
if not lock then
|
||||
if removeChildren == "1" then
|
||||
local dependencies = rcall("SMEMBERS", jobKey .. ":dependencies")
|
||||
if (#dependencies > 0) then
|
||||
for i, childJobKey in ipairs(dependencies) do
|
||||
-- We need to get the jobId for this job.
|
||||
local childJobId = getJobIdFromKey(childJobKey)
|
||||
local childJobPrefix = getJobKeyPrefix(childJobKey, childJobId)
|
||||
local result = isLocked( childJobPrefix, childJobId, removeChildren )
|
||||
if result then
|
||||
return true
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
return true
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/isQueueMaxed.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/isQueueMaxed.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to check if queue is maxed or not.
|
||||
]]
|
||||
local function isQueueMaxed(queueMetaKey, activeKey)
|
||||
local maxConcurrency = rcall("HGET", queueMetaKey, "concurrency")
|
||||
|
||||
if maxConcurrency then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
if activeCount >= tonumber(maxConcurrency) then
|
||||
return true
|
||||
end
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
7
backend/node_modules/bullmq/dist/esm/commands/includes/isQueuePaused.lua
generated
vendored
Normal file
7
backend/node_modules/bullmq/dist/esm/commands/includes/isQueuePaused.lua
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
--[[
|
||||
Function to check for the meta.paused key to decide if we are paused or not
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
local function isQueuePaused(queueMetaKey)
|
||||
return rcall("HEXISTS", queueMetaKey, "paused") == 1
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/esm/commands/includes/isQueuePausedOrMaxed.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/esm/commands/includes/isQueuePausedOrMaxed.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to check if queue is paused or maxed
|
||||
(since an empty list and !EXISTS are not really the same).
|
||||
]]
|
||||
|
||||
local function isQueuePausedOrMaxed(queueMetaKey, activeKey)
|
||||
local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency")
|
||||
|
||||
if queueAttributes[1] then
|
||||
return true
|
||||
else
|
||||
if queueAttributes[2] then
|
||||
local activeCount = rcall("LLEN", activeKey)
|
||||
return activeCount >= tonumber(queueAttributes[2])
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
70
backend/node_modules/bullmq/dist/esm/commands/includes/moveChildFromDependenciesIfNeeded.lua
generated
vendored
Normal file
70
backend/node_modules/bullmq/dist/esm/commands/includes/moveChildFromDependenciesIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
--[[
|
||||
Function to recursively move from waitingChildren to failed.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNoPendingDependencies"
|
||||
--- @include "moveParentToWaitIfNeeded"
|
||||
--- @include "moveParentToWait"
|
||||
|
||||
local handleChildFailureAndMoveParentToWait = function (parentQueueKey, parentKey, parentId, jobIdKey, timestamp)
|
||||
if rcall("EXISTS", parentKey) == 1 then
|
||||
local parentWaitingChildrenKey = parentQueueKey .. ":waiting-children"
|
||||
local parentDelayedKey = parentQueueKey .. ":delayed"
|
||||
local parentWaitingChildrenOrDelayedKey
|
||||
if rcall("ZSCORE", parentWaitingChildrenKey, parentId) then
|
||||
parentWaitingChildrenOrDelayedKey = parentWaitingChildrenKey
|
||||
elseif rcall("ZSCORE", parentDelayedKey, parentId) then
|
||||
parentWaitingChildrenOrDelayedKey = parentDelayedKey
|
||||
rcall("HSET", parentKey, "delay", 0)
|
||||
end
|
||||
|
||||
if parentWaitingChildrenOrDelayedKey then
|
||||
rcall("ZREM", parentWaitingChildrenOrDelayedKey, parentId)
|
||||
local deferredFailure = "child " .. jobIdKey .. " failed"
|
||||
rcall("HSET", parentKey, "defa", deferredFailure)
|
||||
moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
else
|
||||
if not rcall("ZSCORE", parentQueueKey .. ":failed", parentId) then
|
||||
local deferredFailure = "child " .. jobIdKey .. " failed"
|
||||
rcall("HSET", parentKey, "defa", deferredFailure)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local moveChildFromDependenciesIfNeeded = function (rawParentData, childKey, failedReason, timestamp)
|
||||
if rawParentData then
|
||||
local parentData = cjson.decode(rawParentData)
|
||||
local parentKey = parentData['queueKey'] .. ':' .. parentData['id']
|
||||
local parentDependenciesChildrenKey = parentKey .. ":dependencies"
|
||||
if parentData['fpof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
local parentUnsuccessfulChildrenKey = parentKey .. ":unsuccessful"
|
||||
rcall("ZADD", parentUnsuccessfulChildrenKey, timestamp, childKey)
|
||||
handleChildFailureAndMoveParentToWait(
|
||||
parentData['queueKey'],
|
||||
parentKey,
|
||||
parentData['id'],
|
||||
childKey,
|
||||
timestamp
|
||||
)
|
||||
end
|
||||
elseif parentData['cpof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
local parentFailedChildrenKey = parentKey .. ":failed"
|
||||
rcall("HSET", parentFailedChildrenKey, childKey, failedReason)
|
||||
moveParentToWaitIfNeeded(parentData['queueKey'], parentKey, parentData['id'], timestamp)
|
||||
end
|
||||
elseif parentData['idof'] or parentData['rdof'] then
|
||||
if rcall("SREM", parentDependenciesChildrenKey, childKey) == 1 then
|
||||
moveParentToWaitIfNoPendingDependencies(parentData['queueKey'], parentDependenciesChildrenKey,
|
||||
parentKey, parentData['id'], timestamp)
|
||||
if parentData['idof'] then
|
||||
local parentFailedChildrenKey = parentKey .. ":failed"
|
||||
rcall("HSET", parentFailedChildrenKey, childKey, failedReason)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/esm/commands/includes/moveJobFromPrioritizedToActive.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/esm/commands/includes/moveJobFromPrioritizedToActive.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Function to move job from prioritized state to active.
|
||||
]]
|
||||
|
||||
local function moveJobFromPrioritizedToActive(priorityKey, activeKey, priorityCounterKey)
|
||||
local prioritizedJob = rcall("ZPOPMIN", priorityKey)
|
||||
if #prioritizedJob > 0 then
|
||||
rcall("LPUSH", activeKey, prioritizedJob[1])
|
||||
return prioritizedJob[1]
|
||||
else
|
||||
rcall("DEL", priorityCounterKey)
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/moveJobToWait.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/moveJobToWait.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to move job to wait to be picked up by a waiting worker.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "getTargetQueueList"
|
||||
|
||||
local function moveJobToWait(metaKey, activeKey, waitKey, pausedKey, markerKey, eventStreamKey,
|
||||
jobId, pushCmd)
|
||||
local target, isPausedOrMaxed = getTargetQueueList(metaKey, activeKey, waitKey, pausedKey)
|
||||
addJobInTargetList(target, markerKey, pushCmd, isPausedOrMaxed, jobId)
|
||||
|
||||
rcall("XADD", eventStreamKey, "*", "event", "waiting", "jobId", jobId, 'prev', 'active')
|
||||
end
|
||||
45
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWait.lua
generated
vendored
Normal file
45
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWait.lua
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
--[[
|
||||
Move parent to a wait status (wait, prioritized or delayed)
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addDelayMarkerIfNeeded"
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "isQueuePausedOrMaxed"
|
||||
--- @include "getTargetQueueList"
|
||||
local function moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
local parentWaitKey = parentQueueKey .. ":wait"
|
||||
local parentPausedKey = parentQueueKey .. ":paused"
|
||||
local parentActiveKey = parentQueueKey .. ":active"
|
||||
local parentMetaKey = parentQueueKey .. ":meta"
|
||||
|
||||
local parentMarkerKey = parentQueueKey .. ":marker"
|
||||
local jobAttributes = rcall("HMGET", parentKey, "priority", "delay")
|
||||
local priority = tonumber(jobAttributes[1]) or 0
|
||||
local delay = tonumber(jobAttributes[2]) or 0
|
||||
|
||||
if delay > 0 then
|
||||
local delayedTimestamp = tonumber(timestamp) + delay
|
||||
local score = delayedTimestamp * 0x1000
|
||||
local parentDelayedKey = parentQueueKey .. ":delayed"
|
||||
rcall("ZADD", parentDelayedKey, score, parentId)
|
||||
rcall("XADD", parentQueueKey .. ":events", "*", "event", "delayed", "jobId", parentId, "delay",
|
||||
delayedTimestamp)
|
||||
|
||||
addDelayMarkerIfNeeded(parentMarkerKey, parentDelayedKey)
|
||||
else
|
||||
if priority == 0 then
|
||||
local parentTarget, isParentPausedOrMaxed = getTargetQueueList(parentMetaKey, parentActiveKey,
|
||||
parentWaitKey, parentPausedKey)
|
||||
addJobInTargetList(parentTarget, parentMarkerKey, "RPUSH", isParentPausedOrMaxed, parentId)
|
||||
else
|
||||
local isPausedOrMaxed = isQueuePausedOrMaxed(parentMetaKey, parentActiveKey)
|
||||
addJobWithPriority(parentMarkerKey, parentQueueKey .. ":prioritized", priority, parentId,
|
||||
parentQueueKey .. ":pc", isPausedOrMaxed)
|
||||
end
|
||||
|
||||
rcall("XADD", parentQueueKey .. ":events", "*", "event", "waiting", "jobId", parentId, "prev",
|
||||
"waiting-children")
|
||||
end
|
||||
end
|
||||
14
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWaitIfNeeded.lua
generated
vendored
Normal file
14
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWaitIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
--[[
|
||||
Validate and move parent to a wait status (waiting, delayed or prioritized) if needed.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "moveParentToWait"
|
||||
local function moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp)
|
||||
if rcall("EXISTS", parentKey) == 1 then
|
||||
local parentWaitingChildrenKey = parentQueueKey .. ":waiting-children"
|
||||
if rcall("ZSCORE", parentWaitingChildrenKey, parentId) then
|
||||
rcall("ZREM", parentWaitingChildrenKey, parentId)
|
||||
moveParentToWait(parentQueueKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWaitIfNoPendingDependencies.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/esm/commands/includes/moveParentToWaitIfNoPendingDependencies.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Validate and move parent to a wait status (waiting, delayed or prioritized)
|
||||
if no pending dependencies.
|
||||
]]
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNeeded"
|
||||
local function moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey,
|
||||
parentId, timestamp)
|
||||
local doNotHavePendingDependencies = rcall("SCARD", parentDependenciesKey) == 0
|
||||
if doNotHavePendingDependencies then
|
||||
moveParentToWaitIfNeeded(parentQueueKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
end
|
||||
49
backend/node_modules/bullmq/dist/esm/commands/includes/prepareJobForProcessing.lua
generated
vendored
Normal file
49
backend/node_modules/bullmq/dist/esm/commands/includes/prepareJobForProcessing.lua
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
--[[
|
||||
Function to move job from wait state to active.
|
||||
Input:
|
||||
opts - token - lock token
|
||||
opts - lockDuration
|
||||
opts - limiter
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
|
||||
local function prepareJobForProcessing(keyPrefix, rateLimiterKey, eventStreamKey,
|
||||
jobId, processedOn, maxJobs, limiterDuration, markerKey, opts)
|
||||
local jobKey = keyPrefix .. jobId
|
||||
|
||||
-- Check if we need to perform rate limiting.
|
||||
if maxJobs then
|
||||
local jobCounter = tonumber(rcall("INCR", rateLimiterKey))
|
||||
|
||||
if jobCounter == 1 then
|
||||
local integerDuration = math.floor(math.abs(limiterDuration))
|
||||
rcall("PEXPIRE", rateLimiterKey, integerDuration)
|
||||
end
|
||||
end
|
||||
|
||||
-- get a lock
|
||||
if opts['token'] ~= "0" then
|
||||
local lockKey = jobKey .. ':lock'
|
||||
rcall("SET", lockKey, opts['token'], "PX", opts['lockDuration'])
|
||||
end
|
||||
|
||||
local optionalValues = {}
|
||||
|
||||
if opts['name'] then
|
||||
-- Set "processedBy" field to the worker name
|
||||
table.insert(optionalValues, "pb")
|
||||
table.insert(optionalValues, opts['name'])
|
||||
end
|
||||
|
||||
rcall("XADD", eventStreamKey, "*", "event", "active", "jobId", jobId, "prev", "waiting")
|
||||
rcall("HMSET", jobKey, "processedOn", processedOn, unpack(optionalValues))
|
||||
rcall("HINCRBY", jobKey, "ats", 1)
|
||||
|
||||
addBaseMarkerIfNeeded(markerKey, false)
|
||||
|
||||
-- rate limit delay must be 0 in this case to prevent adding more delay
|
||||
-- when job that is moved to active needs to be processed
|
||||
return {rcall("HGETALL", jobKey), jobId, 0, 0} -- get job data
|
||||
end
|
||||
44
backend/node_modules/bullmq/dist/esm/commands/includes/promoteDelayedJobs.lua
generated
vendored
Normal file
44
backend/node_modules/bullmq/dist/esm/commands/includes/promoteDelayedJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
--[[
|
||||
Updates the delay set, by moving delayed jobs that should
|
||||
be processed now to "wait".
|
||||
|
||||
Events:
|
||||
'waiting'
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addBaseMarkerIfNeeded"
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "addJobWithPriority"
|
||||
--- @include "getPriorityScore"
|
||||
|
||||
-- Try to get as much as 1000 jobs at once
|
||||
local function promoteDelayedJobs(delayedKey, markerKey, targetKey, prioritizedKey,
|
||||
eventStreamKey, prefix, timestamp, priorityCounterKey, isPaused)
|
||||
local jobs = rcall("ZRANGEBYSCORE", delayedKey, 0, (timestamp + 1) * 0x1000 - 1, "LIMIT", 0, 1000)
|
||||
|
||||
if (#jobs > 0) then
|
||||
rcall("ZREM", delayedKey, unpack(jobs))
|
||||
|
||||
for _, jobId in ipairs(jobs) do
|
||||
local jobKey = prefix .. jobId
|
||||
local priority =
|
||||
tonumber(rcall("HGET", jobKey, "priority")) or 0
|
||||
|
||||
if priority == 0 then
|
||||
-- LIFO or FIFO
|
||||
rcall("LPUSH", targetKey, jobId)
|
||||
else
|
||||
local score = getPriorityScore(priority, priorityCounterKey)
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
end
|
||||
|
||||
-- Emit waiting event
|
||||
rcall("XADD", eventStreamKey, "*", "event", "waiting", "jobId",
|
||||
jobId, "prev", "delayed")
|
||||
rcall("HSET", jobKey, "delay", 0)
|
||||
end
|
||||
|
||||
addBaseMarkerIfNeeded(markerKey, isPaused)
|
||||
end
|
||||
end
|
||||
9
backend/node_modules/bullmq/dist/esm/commands/includes/pushBackJobWithPriority.lua
generated
vendored
Normal file
9
backend/node_modules/bullmq/dist/esm/commands/includes/pushBackJobWithPriority.lua
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
--[[
|
||||
Function to push back job considering priority in front of same prioritized jobs.
|
||||
]]
|
||||
local function pushBackJobWithPriority(prioritizedKey, priority, jobId)
|
||||
-- in order to put it at front of same prioritized jobs
|
||||
-- we consider prioritized counter as 0
|
||||
local score = priority * 0x100000000
|
||||
rcall("ZADD", prioritizedKey, score, jobId)
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/esm/commands/includes/removeDeduplicationKeyIfNeededOnFinalization.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/esm/commands/includes/removeDeduplicationKeyIfNeededOnFinalization.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Function to remove deduplication key if needed
|
||||
when a job is moved to completed or failed states.
|
||||
]]
|
||||
|
||||
local function removeDeduplicationKeyIfNeededOnFinalization(prefixKey,
|
||||
deduplicationId, jobId)
|
||||
if deduplicationId then
|
||||
local deduplicationKey = prefixKey .. "de:" .. deduplicationId
|
||||
local pttl = rcall("PTTL", deduplicationKey)
|
||||
|
||||
if pttl == 0 then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
|
||||
if pttl == -1 then
|
||||
local currentJobId = rcall('GET', deduplicationKey)
|
||||
if currentJobId and currentJobId == jobId then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeDeduplicationKeyIfNeededOnRemoval.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeDeduplicationKeyIfNeededOnRemoval.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to remove deduplication key if needed
|
||||
when a job is being removed.
|
||||
]]
|
||||
|
||||
local function removeDeduplicationKeyIfNeededOnRemoval(prefixKey,
|
||||
jobId, deduplicationId)
|
||||
if deduplicationId then
|
||||
local deduplicationKey = prefixKey .. "de:" .. deduplicationId
|
||||
local currentJobId = rcall('GET', deduplicationKey)
|
||||
if currentJobId and currentJobId == jobId then
|
||||
return rcall("DEL", deduplicationKey)
|
||||
end
|
||||
end
|
||||
end
|
||||
18
backend/node_modules/bullmq/dist/esm/commands/includes/removeJob.lua
generated
vendored
Normal file
18
backend/node_modules/bullmq/dist/esm/commands/includes/removeJob.lua
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
--[[
|
||||
Function to remove job.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeDeduplicationKeyIfNeededOnRemoval"
|
||||
--- @include "removeJobKeys"
|
||||
--- @include "removeParentDependencyKey"
|
||||
|
||||
local function removeJob(jobId, hard, baseKey, shouldRemoveDeduplicationKey)
|
||||
local jobKey = baseKey .. jobId
|
||||
removeParentDependencyKey(jobKey, hard, nil, baseKey)
|
||||
if shouldRemoveDeduplicationKey then
|
||||
local deduplicationId = rcall("HGET", jobKey, "deid")
|
||||
removeDeduplicationKeyIfNeededOnRemoval(baseKey, jobId, deduplicationId)
|
||||
end
|
||||
removeJobKeys(jobKey)
|
||||
end
|
||||
35
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobFromAnyState.lua
generated
vendored
Normal file
35
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobFromAnyState.lua
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
--[[
|
||||
Function to remove from any state.
|
||||
|
||||
returns:
|
||||
prev state
|
||||
]]
|
||||
|
||||
local function removeJobFromAnyState( prefix, jobId)
|
||||
-- We start with the ZSCORE checks, since they have O(1) complexity
|
||||
if rcall("ZSCORE", prefix .. "completed", jobId) then
|
||||
rcall("ZREM", prefix .. "completed", jobId)
|
||||
return "completed"
|
||||
elseif rcall("ZSCORE", prefix .. "waiting-children", jobId) then
|
||||
rcall("ZREM", prefix .. "waiting-children", jobId)
|
||||
return "waiting-children"
|
||||
elseif rcall("ZSCORE", prefix .. "delayed", jobId) then
|
||||
rcall("ZREM", prefix .. "delayed", jobId)
|
||||
return "delayed"
|
||||
elseif rcall("ZSCORE", prefix .. "failed", jobId) then
|
||||
rcall("ZREM", prefix .. "failed", jobId)
|
||||
return "failed"
|
||||
elseif rcall("ZSCORE", prefix .. "prioritized", jobId) then
|
||||
rcall("ZREM", prefix .. "prioritized", jobId)
|
||||
return "prioritized"
|
||||
-- We remove only 1 element from the list, since we assume they are not added multiple times
|
||||
elseif rcall("LREM", prefix .. "wait", 1, jobId) == 1 then
|
||||
return "wait"
|
||||
elseif rcall("LREM", prefix .. "paused", 1, jobId) == 1 then
|
||||
return "paused"
|
||||
elseif rcall("LREM", prefix .. "active", 1, jobId) == 1 then
|
||||
return "active"
|
||||
end
|
||||
|
||||
return "unknown"
|
||||
end
|
||||
8
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobKeys.lua
generated
vendored
Normal file
8
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobKeys.lua
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
--[[
|
||||
Function to remove job keys.
|
||||
]]
|
||||
|
||||
local function removeJobKeys(jobKey)
|
||||
return rcall("DEL", jobKey, jobKey .. ':logs', jobKey .. ':dependencies',
|
||||
jobKey .. ':processed', jobKey .. ':failed', jobKey .. ':unsuccessful')
|
||||
end
|
||||
96
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobWithChildren.lua
generated
vendored
Normal file
96
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobWithChildren.lua
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
--[[
|
||||
Remove a job from all the statuses it may be in as well as all its data,
|
||||
including its children. Active children can be ignored.
|
||||
|
||||
Events:
|
||||
'removed'
|
||||
]]
|
||||
|
||||
local rcall = redis.call
|
||||
|
||||
-- Includes
|
||||
--- @include "destructureJobKey"
|
||||
--- @include "getOrSetMaxEvents"
|
||||
--- @include "isJobSchedulerJob"
|
||||
--- @include "removeDeduplicationKeyIfNeededOnRemoval"
|
||||
--- @include "removeJobFromAnyState"
|
||||
--- @include "removeJobKeys"
|
||||
--- @include "removeParentDependencyKey"
|
||||
--- @include "isLocked"
|
||||
|
||||
local removeJobChildren
|
||||
local removeJobWithChildren
|
||||
|
||||
removeJobChildren = function(prefix, jobKey, options)
|
||||
-- Check if this job has children
|
||||
-- If so, we are going to try to remove the children recursively in a depth-first way
|
||||
-- because if some job is locked, we must exit with an error.
|
||||
|
||||
if not options.ignoreProcessed then
|
||||
local processed = rcall("HGETALL", jobKey .. ":processed")
|
||||
if #processed > 0 then
|
||||
for i = 1, #processed, 2 do
|
||||
local childJobId = getJobIdFromKey(processed[i])
|
||||
local childJobPrefix = getJobKeyPrefix(processed[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
|
||||
local failed = rcall("HGETALL", jobKey .. ":failed")
|
||||
if #failed > 0 then
|
||||
for i = 1, #failed, 2 do
|
||||
local childJobId = getJobIdFromKey(failed[i])
|
||||
local childJobPrefix = getJobKeyPrefix(failed[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
|
||||
local unsuccessful = rcall("ZRANGE", jobKey .. ":unsuccessful", 0, -1)
|
||||
if #unsuccessful > 0 then
|
||||
for i = 1, #unsuccessful, 1 do
|
||||
local childJobId = getJobIdFromKey(unsuccessful[i])
|
||||
local childJobPrefix = getJobKeyPrefix(unsuccessful[i], childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local dependencies = rcall("SMEMBERS", jobKey .. ":dependencies")
|
||||
if #dependencies > 0 then
|
||||
for i, childJobKey in ipairs(dependencies) do
|
||||
local childJobId = getJobIdFromKey(childJobKey)
|
||||
local childJobPrefix = getJobKeyPrefix(childJobKey, childJobId)
|
||||
removeJobWithChildren(childJobPrefix, childJobId, jobKey, options)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
removeJobWithChildren = function(prefix, jobId, parentKey, options)
|
||||
local jobKey = prefix .. jobId
|
||||
|
||||
if options.ignoreLocked then
|
||||
if isLocked(prefix, jobId) then
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
-- Check if job is in the failed zset
|
||||
local failedSet = prefix .. "failed"
|
||||
if not (options.ignoreProcessed and rcall("ZSCORE", failedSet, jobId)) then
|
||||
removeParentDependencyKey(jobKey, false, parentKey, nil)
|
||||
|
||||
if options.removeChildren then
|
||||
removeJobChildren(prefix, jobKey, options)
|
||||
end
|
||||
|
||||
local prev = removeJobFromAnyState(prefix, jobId)
|
||||
local deduplicationId = rcall("HGET", jobKey, "deid")
|
||||
removeDeduplicationKeyIfNeededOnRemoval(prefix, jobId, deduplicationId)
|
||||
if removeJobKeys(jobKey) > 0 then
|
||||
local metaKey = prefix .. "meta"
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
rcall("XADD", prefix .. "events", "MAXLEN", "~", maxEvents, "*", "event", "removed",
|
||||
"jobId", jobId, "prev", prev)
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobs.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Functions to remove jobs.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobs(keys, hard, baseKey, max)
|
||||
for i, key in ipairs(keys) do
|
||||
removeJob(key, hard, baseKey, true --[[remove debounce key]])
|
||||
end
|
||||
return max - #keys
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobsByMaxAge.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobsByMaxAge.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Functions to remove jobs by max age.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix, maxLimit)
|
||||
local start = timestamp - maxAge * 1000
|
||||
local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf", "LIMIT", 0, maxLimit)
|
||||
for i, jobId in ipairs(jobIds) do
|
||||
removeJob(jobId, false, prefix, false --[[remove debounce key]])
|
||||
end
|
||||
rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start)
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobsByMaxCount.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/removeJobsByMaxCount.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Functions to remove jobs by max count.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "removeJob"
|
||||
|
||||
local function removeJobsByMaxCount(maxCount, targetSet, prefix)
|
||||
local start = maxCount
|
||||
local jobIds = rcall("ZREVRANGE", targetSet, start, -1)
|
||||
for i, jobId in ipairs(jobIds) do
|
||||
removeJob(jobId, false, prefix, false --[[remove debounce key]])
|
||||
end
|
||||
rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1))
|
||||
end
|
||||
23
backend/node_modules/bullmq/dist/esm/commands/includes/removeListJobs.lua
generated
vendored
Normal file
23
backend/node_modules/bullmq/dist/esm/commands/includes/removeListJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
--[[
|
||||
Functions to remove jobs.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "filterOutJobsToIgnore"
|
||||
--- @include "removeJobs"
|
||||
|
||||
local function getListItems(keyName, max)
|
||||
return rcall('LRANGE', keyName, 0, max - 1)
|
||||
end
|
||||
|
||||
local function removeListJobs(keyName, hard, baseKey, max, jobsToIgnore)
|
||||
local jobs = getListItems(keyName, max)
|
||||
|
||||
if jobsToIgnore then
|
||||
jobs = filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
end
|
||||
|
||||
local count = removeJobs(jobs, hard, baseKey, max)
|
||||
rcall("LTRIM", keyName, #jobs, -1)
|
||||
return count
|
||||
end
|
||||
19
backend/node_modules/bullmq/dist/esm/commands/includes/removeLock.lua
generated
vendored
Normal file
19
backend/node_modules/bullmq/dist/esm/commands/includes/removeLock.lua
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
local function removeLock(jobKey, stalledKey, token, jobId)
|
||||
if token ~= "0" then
|
||||
local lockKey = jobKey .. ':lock'
|
||||
local lockToken = rcall("GET", lockKey)
|
||||
if lockToken == token then
|
||||
rcall("DEL", lockKey)
|
||||
rcall("SREM", stalledKey, jobId)
|
||||
else
|
||||
if lockToken then
|
||||
-- Lock exists but token does not match
|
||||
return -6
|
||||
else
|
||||
-- Lock is missing completely
|
||||
return -2
|
||||
end
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
90
backend/node_modules/bullmq/dist/esm/commands/includes/removeParentDependencyKey.lua
generated
vendored
Normal file
90
backend/node_modules/bullmq/dist/esm/commands/includes/removeParentDependencyKey.lua
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
--[[
|
||||
Check if this job has a parent. If so we will just remove it from
|
||||
the parent child list, but if it is the last child we should move the parent to "wait/paused"
|
||||
which requires code from "moveToFinished"
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "addJobInTargetList"
|
||||
--- @include "destructureJobKey"
|
||||
--- @include "getTargetQueueList"
|
||||
--- @include "removeJobKeys"
|
||||
|
||||
local function _moveParentToWait(parentPrefix, parentId, emitEvent)
|
||||
local parentTarget, isPausedOrMaxed = getTargetQueueList(parentPrefix .. "meta", parentPrefix .. "active",
|
||||
parentPrefix .. "wait", parentPrefix .. "paused")
|
||||
addJobInTargetList(parentTarget, parentPrefix .. "marker", "RPUSH", isPausedOrMaxed, parentId)
|
||||
|
||||
if emitEvent then
|
||||
local parentEventStream = parentPrefix .. "events"
|
||||
rcall("XADD", parentEventStream, "*", "event", "waiting", "jobId", parentId, "prev", "waiting-children")
|
||||
end
|
||||
end
|
||||
|
||||
local function removeParentDependencyKey(jobKey, hard, parentKey, baseKey, debounceId)
|
||||
if parentKey then
|
||||
local parentDependenciesKey = parentKey .. ":dependencies"
|
||||
local result = rcall("SREM", parentDependenciesKey, jobKey)
|
||||
if result > 0 then
|
||||
local pendingDependencies = rcall("SCARD", parentDependenciesKey)
|
||||
if pendingDependencies == 0 then
|
||||
local parentId = getJobIdFromKey(parentKey)
|
||||
local parentPrefix = getJobKeyPrefix(parentKey, parentId)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", parentPrefix .. "waiting-children", parentId)
|
||||
|
||||
if numRemovedElements == 1 then
|
||||
if hard then -- remove parent in same queue
|
||||
if parentPrefix == baseKey then
|
||||
removeParentDependencyKey(parentKey, hard, nil, baseKey, nil)
|
||||
removeJobKeys(parentKey)
|
||||
if debounceId then
|
||||
rcall("DEL", parentPrefix .. "de:" .. debounceId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
else
|
||||
local parentAttributes = rcall("HMGET", jobKey, "parentKey", "deid")
|
||||
local missedParentKey = parentAttributes[1]
|
||||
if( (type(missedParentKey) == "string") and missedParentKey ~= ""
|
||||
and (rcall("EXISTS", missedParentKey) == 1)) then
|
||||
local parentDependenciesKey = missedParentKey .. ":dependencies"
|
||||
local result = rcall("SREM", parentDependenciesKey, jobKey)
|
||||
if result > 0 then
|
||||
local pendingDependencies = rcall("SCARD", parentDependenciesKey)
|
||||
if pendingDependencies == 0 then
|
||||
local parentId = getJobIdFromKey(missedParentKey)
|
||||
local parentPrefix = getJobKeyPrefix(missedParentKey, parentId)
|
||||
|
||||
local numRemovedElements = rcall("ZREM", parentPrefix .. "waiting-children", parentId)
|
||||
|
||||
if numRemovedElements == 1 then
|
||||
if hard then
|
||||
if parentPrefix == baseKey then
|
||||
removeParentDependencyKey(missedParentKey, hard, nil, baseKey, nil)
|
||||
removeJobKeys(missedParentKey)
|
||||
if parentAttributes[2] then
|
||||
rcall("DEL", parentPrefix .. "de:" .. parentAttributes[2])
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId)
|
||||
end
|
||||
else
|
||||
_moveParentToWait(parentPrefix, parentId, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
21
backend/node_modules/bullmq/dist/esm/commands/includes/removeZSetJobs.lua
generated
vendored
Normal file
21
backend/node_modules/bullmq/dist/esm/commands/includes/removeZSetJobs.lua
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
-- Includes
|
||||
--- @include "batches"
|
||||
--- @include "filterOutJobsToIgnore"
|
||||
--- @include "getZSetItems"
|
||||
--- @include "removeJobs"
|
||||
|
||||
local function removeZSetJobs(keyName, hard, baseKey, max, jobsToIgnore)
|
||||
local jobs = getZSetItems(keyName, max)
|
||||
|
||||
if jobsToIgnore then
|
||||
jobs = filterOutJobsToIgnore(jobs, jobsToIgnore)
|
||||
end
|
||||
|
||||
local count = removeJobs(jobs, hard, baseKey, max)
|
||||
if(#jobs > 0) then
|
||||
for from, to in batches(#jobs, 7000) do
|
||||
rcall("ZREM", keyName, unpack(jobs, from, to))
|
||||
end
|
||||
end
|
||||
return count
|
||||
end
|
||||
36
backend/node_modules/bullmq/dist/esm/commands/includes/storeJob.lua
generated
vendored
Normal file
36
backend/node_modules/bullmq/dist/esm/commands/includes/storeJob.lua
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
--[[
|
||||
Function to store a job
|
||||
]]
|
||||
local function storeJob(eventsKey, jobIdKey, jobId, name, data, opts, timestamp,
|
||||
parentKey, parentData, repeatJobKey)
|
||||
local jsonOpts = cjson.encode(opts)
|
||||
local delay = opts['delay'] or 0
|
||||
local priority = opts['priority'] or 0
|
||||
local debounceId = opts['de'] and opts['de']['id']
|
||||
|
||||
local optionalValues = {}
|
||||
if parentKey ~= nil then
|
||||
table.insert(optionalValues, "parentKey")
|
||||
table.insert(optionalValues, parentKey)
|
||||
table.insert(optionalValues, "parent")
|
||||
table.insert(optionalValues, parentData)
|
||||
end
|
||||
|
||||
if repeatJobKey then
|
||||
table.insert(optionalValues, "rjk")
|
||||
table.insert(optionalValues, repeatJobKey)
|
||||
end
|
||||
|
||||
if debounceId then
|
||||
table.insert(optionalValues, "deid")
|
||||
table.insert(optionalValues, debounceId)
|
||||
end
|
||||
|
||||
rcall("HMSET", jobIdKey, "name", name, "data", data, "opts", jsonOpts,
|
||||
"timestamp", timestamp, "delay", delay, "priority", priority,
|
||||
unpack(optionalValues))
|
||||
|
||||
rcall("XADD", eventsKey, "*", "event", "added", "jobId", jobId, "name", name)
|
||||
|
||||
return delay, priority
|
||||
end
|
||||
66
backend/node_modules/bullmq/dist/esm/commands/includes/storeJobScheduler.lua
generated
vendored
Normal file
66
backend/node_modules/bullmq/dist/esm/commands/includes/storeJobScheduler.lua
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
--[[
|
||||
Function to store a job scheduler
|
||||
]]
|
||||
local function storeJobScheduler(schedulerId, schedulerKey, repeatKey, nextMillis, opts,
|
||||
templateData, templateOpts)
|
||||
rcall("ZADD", repeatKey, nextMillis, schedulerId)
|
||||
|
||||
local optionalValues = {}
|
||||
if opts['tz'] then
|
||||
table.insert(optionalValues, "tz")
|
||||
table.insert(optionalValues, opts['tz'])
|
||||
end
|
||||
|
||||
if opts['limit'] then
|
||||
table.insert(optionalValues, "limit")
|
||||
table.insert(optionalValues, opts['limit'])
|
||||
end
|
||||
|
||||
if opts['pattern'] then
|
||||
table.insert(optionalValues, "pattern")
|
||||
table.insert(optionalValues, opts['pattern'])
|
||||
end
|
||||
|
||||
if opts['startDate'] then
|
||||
table.insert(optionalValues, "startDate")
|
||||
table.insert(optionalValues, opts['startDate'])
|
||||
end
|
||||
|
||||
if opts['endDate'] then
|
||||
table.insert(optionalValues, "endDate")
|
||||
table.insert(optionalValues, opts['endDate'])
|
||||
end
|
||||
|
||||
if opts['every'] then
|
||||
table.insert(optionalValues, "every")
|
||||
table.insert(optionalValues, opts['every'])
|
||||
end
|
||||
|
||||
if opts['offset'] then
|
||||
table.insert(optionalValues, "offset")
|
||||
table.insert(optionalValues, opts['offset'])
|
||||
else
|
||||
local offset = rcall("HGET", schedulerKey, "offset")
|
||||
if offset then
|
||||
table.insert(optionalValues, "offset")
|
||||
table.insert(optionalValues, tonumber(offset))
|
||||
end
|
||||
end
|
||||
|
||||
local jsonTemplateOpts = cjson.encode(templateOpts)
|
||||
if jsonTemplateOpts and jsonTemplateOpts ~= '{}' then
|
||||
table.insert(optionalValues, "opts")
|
||||
table.insert(optionalValues, jsonTemplateOpts)
|
||||
end
|
||||
|
||||
if templateData and templateData ~= '{}' then
|
||||
table.insert(optionalValues, "data")
|
||||
table.insert(optionalValues, templateData)
|
||||
end
|
||||
|
||||
table.insert(optionalValues, "ic")
|
||||
table.insert(optionalValues, rcall("HGET", schedulerKey, "ic") or 1)
|
||||
|
||||
rcall("DEL", schedulerKey) -- remove all attributes and then re-insert new ones
|
||||
rcall("HMSET", schedulerKey, "name", opts['name'], unpack(optionalValues))
|
||||
end
|
||||
15
backend/node_modules/bullmq/dist/esm/commands/includes/trimEvents.lua
generated
vendored
Normal file
15
backend/node_modules/bullmq/dist/esm/commands/includes/trimEvents.lua
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
--[[
|
||||
Function to trim events, default 10000.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "getOrSetMaxEvents"
|
||||
|
||||
local function trimEvents(metaKey, eventStreamKey)
|
||||
local maxEvents = getOrSetMaxEvents(metaKey)
|
||||
if maxEvents then
|
||||
rcall("XTRIM", eventStreamKey, "MAXLEN", "~", maxEvents)
|
||||
else
|
||||
rcall("XTRIM", eventStreamKey, "MAXLEN", "~", 10000)
|
||||
end
|
||||
end
|
||||
27
backend/node_modules/bullmq/dist/esm/commands/includes/updateExistingJobsParent.lua
generated
vendored
Normal file
27
backend/node_modules/bullmq/dist/esm/commands/includes/updateExistingJobsParent.lua
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
--[[
|
||||
This function is used to update the parent's dependencies if the job
|
||||
is already completed and about to be ignored. The parent must get its
|
||||
dependencies updated to avoid the parent job being stuck forever in
|
||||
the waiting-children state.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "updateParentDepsIfNeeded"
|
||||
|
||||
local function updateExistingJobsParent(parentKey, parent, parentData,
|
||||
parentDependenciesKey, completedKey,
|
||||
jobIdKey, jobId, timestamp)
|
||||
if parentKey ~= nil then
|
||||
if rcall("ZSCORE", completedKey, jobId) then
|
||||
local returnvalue = rcall("HGET", jobIdKey, "returnvalue")
|
||||
updateParentDepsIfNeeded(parentKey, parent['queueKey'],
|
||||
parentDependenciesKey, parent['id'],
|
||||
jobIdKey, returnvalue, timestamp)
|
||||
else
|
||||
if parentDependenciesKey ~= nil then
|
||||
rcall("SADD", parentDependenciesKey, jobIdKey)
|
||||
end
|
||||
end
|
||||
rcall("HMSET", jobIdKey, "parentKey", parentKey, "parent", parentData)
|
||||
end
|
||||
end
|
||||
11
backend/node_modules/bullmq/dist/esm/commands/includes/updateJobFields.lua
generated
vendored
Normal file
11
backend/node_modules/bullmq/dist/esm/commands/includes/updateJobFields.lua
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
--[[
|
||||
Function to update a bunch of fields in a job.
|
||||
]]
|
||||
local function updateJobFields(jobKey, msgpackedFields)
|
||||
if msgpackedFields and #msgpackedFields > 0 then
|
||||
local fieldsToUpdate = cmsgpack.unpack(msgpackedFields)
|
||||
if fieldsToUpdate then
|
||||
rcall("HMSET", jobKey, unpack(fieldsToUpdate))
|
||||
end
|
||||
end
|
||||
end
|
||||
13
backend/node_modules/bullmq/dist/esm/commands/includes/updateParentDepsIfNeeded.lua
generated
vendored
Normal file
13
backend/node_modules/bullmq/dist/esm/commands/includes/updateParentDepsIfNeeded.lua
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
--[[
|
||||
Validate and move or add dependencies to parent.
|
||||
]]
|
||||
|
||||
-- Includes
|
||||
--- @include "moveParentToWaitIfNoPendingDependencies"
|
||||
|
||||
local function updateParentDepsIfNeeded(parentKey, parentQueueKey, parentDependenciesKey,
|
||||
parentId, jobIdKey, returnvalue, timestamp )
|
||||
local processedSet = parentKey .. ":processed"
|
||||
rcall("HSET", processedSet, jobIdKey, returnvalue)
|
||||
moveParentToWaitIfNoPendingDependencies(parentQueueKey, parentDependenciesKey, parentKey, parentId, timestamp)
|
||||
end
|
||||
Reference in New Issue
Block a user