mirror of
https://github.com/lunaisnotaboy/mastodon.git
synced 2024-12-22 13:17:13 +00:00
Change account deletion requests to spread out over time (#20222)
This commit is contained in:
parent
e98833748e
commit
5333447be0
|
@ -3,7 +3,7 @@
|
||||||
class Admin::AccountDeletionWorker
|
class Admin::AccountDeletionWorker
|
||||||
include Sidekiq::Worker
|
include Sidekiq::Worker
|
||||||
|
|
||||||
sidekiq_options queue: 'pull'
|
sidekiq_options queue: 'pull', lock: :until_executed
|
||||||
|
|
||||||
def perform(account_id)
|
def perform(account_id)
|
||||||
DeleteAccountService.new.call(Account.find(account_id), reserve_username: true, reserve_email: true)
|
DeleteAccountService.new.call(Account.find(account_id), reserve_username: true, reserve_email: true)
|
||||||
|
|
38
app/workers/scheduler/suspended_user_cleanup_scheduler.rb
Normal file
38
app/workers/scheduler/suspended_user_cleanup_scheduler.rb
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
class Scheduler::SuspendedUserCleanupScheduler
|
||||||
|
include Sidekiq::Worker
|
||||||
|
|
||||||
|
# Each processed deletion request may enqueue an enormous
|
||||||
|
# amount of jobs in the `pull` queue, so only enqueue when
|
||||||
|
# the queue is empty or close to being so.
|
||||||
|
MAX_PULL_SIZE = 50
|
||||||
|
|
||||||
|
# Since account deletion is very expensive, we want to avoid
|
||||||
|
# overloading the server by queing too much at once.
|
||||||
|
# This job runs approximately once per 2 minutes, so with a
|
||||||
|
# value of `MAX_DELETIONS_PER_JOB` of 10, a server can
|
||||||
|
# handle the deletion of 7200 accounts per day, provided it
|
||||||
|
# has the capacity for it.
|
||||||
|
MAX_DELETIONS_PER_JOB = 10
|
||||||
|
|
||||||
|
sidekiq_options retry: 0
|
||||||
|
|
||||||
|
def perform
|
||||||
|
return if Sidekiq::Queue.new('pull').size > MAX_PULL_SIZE
|
||||||
|
|
||||||
|
clean_suspended_accounts!
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def clean_suspended_accounts!
|
||||||
|
# This should be fine because we only process a small amount of deletion requests at once and
|
||||||
|
# `id` and `created_at` should follow the same order.
|
||||||
|
AccountDeletionRequest.reorder(id: :asc).take(MAX_DELETIONS_PER_JOB).each do |deletion_request|
|
||||||
|
next unless deletion_request.created_at < AccountDeletionRequest::DELAY_TO_DELETION.ago
|
||||||
|
|
||||||
|
Admin::AccountDeletionWorker.perform_async(deletion_request.account_id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -7,7 +7,6 @@ class Scheduler::UserCleanupScheduler
|
||||||
|
|
||||||
def perform
|
def perform
|
||||||
clean_unconfirmed_accounts!
|
clean_unconfirmed_accounts!
|
||||||
clean_suspended_accounts!
|
|
||||||
clean_discarded_statuses!
|
clean_discarded_statuses!
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -22,12 +21,6 @@ class Scheduler::UserCleanupScheduler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def clean_suspended_accounts!
|
|
||||||
AccountDeletionRequest.where('created_at <= ?', AccountDeletionRequest::DELAY_TO_DELETION.ago).reorder(nil).find_each do |deletion_request|
|
|
||||||
Admin::AccountDeletionWorker.perform_async(deletion_request.account_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def clean_discarded_statuses!
|
def clean_discarded_statuses!
|
||||||
Status.unscoped.discarded.where('deleted_at <= ?', 30.days.ago).find_in_batches do |statuses|
|
Status.unscoped.discarded.where('deleted_at <= ?', 30.days.ago).find_in_batches do |statuses|
|
||||||
RemovalWorker.push_bulk(statuses) do |status|
|
RemovalWorker.push_bulk(statuses) do |status|
|
||||||
|
|
|
@ -53,3 +53,7 @@
|
||||||
interval: 1 minute
|
interval: 1 minute
|
||||||
class: Scheduler::AccountsStatusesCleanupScheduler
|
class: Scheduler::AccountsStatusesCleanupScheduler
|
||||||
queue: scheduler
|
queue: scheduler
|
||||||
|
suspended_user_cleanup_scheduler:
|
||||||
|
interval: 1 minute
|
||||||
|
class: Scheduler::SuspendedUserCleanupScheduler
|
||||||
|
queue: scheduler
|
||||||
|
|
Loading…
Reference in a new issue