counter.times do
count_var+= 1
range1 = process_limit * count_var
sms = Delayed::Job.enqueue(SmsManager.new(arr_msg_array[i1...range1], recipients[i1...range1], student_type[i1...range1], reciver_type[i1...range1], reciver_id[i1...range1], created_by[i1...range1])) i1= range1
end
----------
class SmsManager
attr_accessor :recipients, :message, :astudent_type, :areciver_type, :areciver_id, :acreated_by
def initialize(message, recipients, student_type, caregiver_type, receiver_id, created_by)
end
def perform
begin
recipients.each_with_index do |recipient,i|
end
end
end
end
On every failure job , delayed handler tries to re execute the whole array of jobs as per the number of defined reattempts.
Related
I have three Rails jobs to process a player yellow/red cards in a soccer tournament, and the penalties these players will have due to getting this cards.
The idea is that the first job collects all Incidences (an Incidence is when a Player gets a yellow card, to give an example), and counts all the cards a Player got.
class ProcessBookedPlayersJob < ApplicationJob
queue_as :default
#cards = []
def perform(*args)
#cards = []
yellows = calculate_cards(1)
reds = calculate_cards(2)
#cards << yellows << reds
end
def after_perform(*match)
#ProcessPenaltiesJob.perform_later #cards
ProcessPenalties.perform_later #cards
#PenaltiesFinalizerJob.perform_later match
PenaltiesFinalizer.perform_later match
end
def calculate_cards(card_type)
cards = Hash.new
players = Player.fetch_players_with_active_incidences
players.each { |p|
# 1 is yellow, 2 is red
counted_cards = Incidence.incidences_for_player_id_and_incidence_type(p.id, card_type).size
cards[p] = counted_cards
}
return cards
end
end
This first job is executed when an Instance is created.
class Incidence < ApplicationRecord
belongs_to :player
belongs_to :match
after_save :process_incidences, on: :create
def self.incidences_for_player_id_and_incidence_type(player_id, card_type)
return Incidence.where(status: 1).where(incidence_type: card_type).where(player_id: player_id)
end
protected
def process_incidences
ProcessBookedPlayers.perform_later
end
end
After this, another job runs and creates the necessary Penalties (a Penalty is a ban for the next Match, for example) according to the Hash output that the previous job created.
class ProcessPenaltiesJob < ApplicationJob
queue_as :default
def perform(*cards)
yellows = cards[0]
reds = cards[1]
create_penalties_for_yellow_cards(yellows)
create_penalties_for_red_cards(reds)
end
# rest of the job...
And also there's another job, that sets these bans as disabled, once they have expired.
class PenaltiesFinalizerJob < ApplicationJob
queue_as :default
def perform(match)
active_penalties = Penalty.find_by(status: 1)
active_penalties.each do |p|
#penalty.starting_match.order + penalty.length == el_match_que_inserte.order (ver si seria >=)
if p.match.order + p.length >= match.order
p.status = 2 # Inactivate
p.save!
end
end
end
end
As you can see in ProcessBookedPlayersJob's after_perform method
def after_perform(*match)
ProcessPenalties.perform_later #cards
PenaltiesFinalizer.perform_later match
end
I'm trying to get those two other jobs executed (ProcessPenaltiesJob and PenaltiesFinalizerJob) with no luck. The job ProcessBookedPlayersJob is being executed (because I can see this in the log)
[ActiveJob] [ProcessBookedPlayersJob] [dbb8445e-a706-4443-9cb8-2c45f49a4f8f] Performed ProcessBookedPlayersJob (Job ID: dbb8445e-a706-4443-9cb8-2c45f49a4f8f) from Async(default) in 38.81ms
But the other two jobs aren't executed. So, how can I get both ProcessPenaltiesJob and PenaltiesFinalizerJob run after ProcessBookedPlayersJob has finalized its execution? I don't mind if they run in parallel, but they need to be run after the first one finishes, since they need its output as their input.
I have searched for this, and the closest match I found was this answer. Quoting it:
If the sequential jobs you are talking about however are of different
jobs / class, then you can just call the other job once the first job
has finished.
That's exactly the behaviour I'm trying to have... but how can I get my jobs to run sequentially?
For now, I'm thinking in adding the first job's logic into Incidences's after_savehook, but that doesn't sound too natural. Is there any other way to pipeline the execution of my jobs?
Many thanks in advance
I need to fetch transaction data from a third party API, and save the records periodically (once a month). Here is an example:
class BalanceTransaction::Update
include Service
attr_reader :offset, :transactions
def initialize(offset)
#offset = offset
#transactions = fetch_transactions
end
def call
ActiveRecord::Base.transaction do
transactions.auto_paging_each do |txn|
type = txn[:type]
source = txn[:source]
case type
when 'charge', 'adjustment'
invoice = find_invoice(source)
ac_id = invoice.account.id
update_attrs(id: invoice.id, account_id: ac_id, type:'invoice', attrs: txn)
when 'refund'
refund = find_refund(source)
ac_id = refund.invoice.account.id
update_attrs(id: refund.id, account_id: ac_id, type:'refund', attrs: txn)
end
end
end
true
end
private
def find_invoice(source)
Invoice.find_by!(stripe_charge_id: source)
end
def find_refund(source)
Refund.find_by!(stripe_refund_id: source)
end
def update_attrs(id:, account_id:, type:, attrs:)
BalanceTransaction.create(
account_id: account_id,
stripe_transaction_id: attrs[:id],
gross_amount: attrs[:amount],
net_amount: attrs[:net],
fee_amount: attrs[:fee],
currency: attrs[:currency],
transactionable_id: id,
transactionable_type: type)
end
def fetch_transactions
external_card_balance.all(limit: offset)
rescue *EXTERNAL_CARD_ERRORS => e
ExternalCardErrorHandler.new(e).handle
end
def external_card_balance
Stripe::BalanceTransaction
end
end
I wonder how to bulk insert idempotently from the last time. Should I check created_at and delete them if I find data created after the offset? Could you give me some advice?
Does the transaction have unique id's or any field that could be made unique? Maybe you could use validates_uniqueness_of to avoid saving transactions you already fetched.
I haven't had a lot of experience with deadlocking issues in the past, but the more I try to work with ActiveJob and concurrently processing those jobs, I'm running into this problem. An example of one Job that is creating it is shown below. The way it operates is I start ImportGameParticipationsJob and it queues up a bunch of CreateOrUpdateGameParticipationJobs.
When attempting to prevent my SQL Server from alerting me to a ton of deadlock errors, where is the cause likely happening below? Can I get a deadlock from simply selecting records to populate an object? Or can it really only happen when I'm attempting to save/update the record within my process_records method below when saving?
ImportGameParticipationsJob
class ImportGameParticipationsJob < ActiveJob::Base
queue_as :default
def perform(*args)
import_participations(args.first.presence)
end
def import_participations(*args)
games = Game.where(season: 2016)
games.each do |extract_record|
CreateOrUpdateGameParticipationJob.perform_later(extract_record.game_key)
end
end
end
CreateOrUpdateGameParticipationJob
class CreateOrUpdateGameParticipationJob < ActiveJob::Base
queue_as :import_queue
def perform(*args)
if args.first.present?
game_key = args.first
# get all particpations for a given game
game_participations = GameRoster.where(game_key: game_key)
process_records(game_participations)
end
end
def process_records(participations)
# Loop through participations and build record for saving...
participations.each do |participation|
if participation.try(:player_id)
record = create_or_find(participation)
record = update_record(record, participation)
end
begin
if record.valid?
record.save
else
end
rescue Exception => e
end
end
end
def create_or_find(participation)
participation_record = GameParticipation.where(
game_id: participation.game.try(:id),
player_id: participation.player.try(:id))
.first_or_initialize do |record|
record.game = Game.find_by(game_key: participation.game_key)
record.player = Player.find_by(id: participation.player_id)
record.club = Club.find_by(club_id: participation.club_id)
record.status = parse_status(participation.player_status)
end
return participation_record
end
def update_record(record, record)
old_status = record.status
new_status = parse_status(record.player_status)
if old_status != new_status
record.new_status = record.player_status
record.comment = "status was updated via participations import job"
end
return record
end
end
They recently updated and added an additional option you can set that should help with the deadlocking. I had the same issue and was on 4.1, moving to 4.1.1 fixed this issue for me.
https://github.com/collectiveidea/delayed_job_active_record
https://rubygems.org/gems/delayed_job_active_record
Problems locking jobs
You can try using the legacy locking code. It is usually slower but works better for certain people.
Delayed::Backend::ActiveRecord.configuration.reserve_sql_strategy = :default_sql
I need to mark a collection of messages at the background (I am using delayed_job gem) since it takes some time on the foreground. So I've created an ActiveJob class MarkMessagesAsReadJob, and passed it user and messages variables in order to mark all of the messages read for user.
// passing the values in the controller
#messages = #conversation.messages
MarkMessagesAsReadJob.perform_later(current_user, #messages)
and in my ActiveJob class, I perform the task.
// MarkMessagesAsReadJob.rb
class MarkMessagesAsReadJob < ActiveJob::Base
queue_as :default
def perform(user, messages)
messages.mark_as_read! :all, :for => user
end
end
However, when I tried to perform the task, I got the error
ActiveJob::SerializationError (Unsupported argument type: ActiveRecord::Associations::CollectionProxy):
I read that we can only pass supported types to the ActiveJob, and I think it can not serialize the CollectionProxy object. How can I workaround/fix this?
PS: I considered
#messages.map { |message| MarkMessagesAsReadJob.perform_later(current_user, message) }
however I think marking them one by one is pretty expensive .
I think the easy way is pass message ids to the perform_later() method, for example:
in controller:
#messages = #conversation.messages
message_ids = #messages.pluck(:id)
MarkMessagesAsReadJob.perform_later(current_user, message_ids)
And use it in ActiveJob:
def perform(user, message_ids)
messages = Message.where(id: message_ids)
messages.mark_as_read! :all, :for => user
end
For larger data sets, passing the message_ids is impractical. Instead, pass the SQL for the messages:
#messages = #conversation.messages
MarkMessagesAsReadJob.perform_later(current_user, #messages.to_sql)
then query them from the job:
class MarkMessagesAsReadJob < ActiveJob::Base
queue_as :default
def perform(user, messages_sql)
messages = Message.find_by_sql(messages_sql)
messages.mark_as_read! :all, :for => user
end
end
Because the job will be executed later, I think we should pass ids as parameter instead a collection
ActiveJob need serialize parameter, and SerializationError will be thrown if parameter type isn't supported
http://api.rubyonrails.org/classes/ActiveJob/SerializationError.html
ex:
#message_ids = #conversation.messages.pluck(:id)
# use string if array is not supported
# #message_ids = #message_ids.join(", ")
MarkMessagesAsReadJob.perform_later(current_user, #message_ids)
then query those messages again and mark it
class MarkMessagesAsReadJob < ActiveJob::Base
queue_as :default
def perform(user, message_ids)
# use string if array is not supported
# message_ids = message_ids.split(",").map(&:to_i)
messages = Message.where(id: message_ids) #change this to something else
messages.mark_as_read! :all, :for => user
end
end
not tested , hope that it will be ok
I have a delayed_job designed to send an email using a mailer.
Upon completion, I need to record that the email was sent -- I do this by saving the newly created ContactEmail.
Right now, the new ContactEmail records gets saved even if the delayed_job fails.
How do I correct that so that the new ContactEmail is only saved when the mailer is successfully sent?
Here is the snippet from the cron task which calls the delayed_job:
puts contact_email.subject
contact_email.date_sent = Date.today
contact_email.date_created = Date.today
contact_email.body = email.substituted_message(contact, contact.colleagues)
contact_email.status = "sent"
#Delayed::Job.enqueue OutboundMailer.deliver_campaign_email(contact,contact_email)
Delayed::Job.enqueue SomeMailJob.new(contact,contact_email)
contact_email.save #now save the record
Here is the some_mail_job.rb
class SomeMailJob < Struct.new(:contact, :contact_email)
def perform
OutboundMailer.deliver_campaign_email(contact,contact_email)
end
end
And here is the outbound_mailer:
class OutboundMailer < Postage::Mailer
def campaign_email(contact,email)
subject email.subject
recipients contact.email
from '<me#me.com>'
sent_on Date.today
body :email => email
end
You could update the status in the perform of the job itself.
For example, something like:
contact_email.status = 'queued'
contact_email.save
contact_email.delay.deliver_campaign_email
And then in your ContactEmail class, something to the effect of
def deliver_campaign_email
OutboundMailer.deliver_campaign_email(self.contact, self)
self.status = 'sent' # or handle failure and set it appropriately
self.save
end
delayed_job has some magic bits that it adds to your models that will deal with the persistence.
In order to deal with your OutboundMailer throwing an exception, you can do something like so:
def deliver_campaign_email
begin
OutboundMailer.deliver_campaign_email(self.contact, self)
self.status = 'sent'
rescue
self.status = 'failed' # or better yet grab the the message from the exception
end
self.save
end
You need synchronic delivery so stop using delayed job in this case and do standard mailer delivery.
or add success column to you ContactEmail - initialy save it with false then update in job to true