There is a chain of state machine callbacks that invoke ActiveJob::Base classes to process sequential phases of an order lifecycle.
I am running into a problem where the second job in the sequence is not being enqueued and no errors are being printed to the terminal.
The second job is
class WriteEncodedPreviewImageToFilesJob < ActiveJob::Base
queue_as :imagery_queue
def perform(order)
run_phase_of_order_lifecycle(order)
end
private
def run_phase_of_order_lifecycle(order)
begin
BackgroundWorkers::EncodedPreviewImageWriter.work(order)
ensure
order.render_final_image
end
end
end
And it is being invoked here:
class PreAuthorizeStripePaymentJob < ActiveJob::Base
queue_as :stripe_queue
def perform(order)
run_phase_of_order_lifecycle(order)
end
private
def run_phase_of_order_lifecycle(order)
response = create_stripe_charge(order)
if response['failure_code'] == nil
order.stripe_charge_id = response['id']
order.save!
order.write_encoded_preview_image_to_files # this line here!!!
end
response
end
def create_stripe_charge(order)
Stripe::Charge.create(
:amount => (order.original[:order][:price] * 100).round,
:currency => 'gbp',
:capture => false,
:source => order.stripe_id,
:description => "Charge for Order with id: #{order.id}",
:receipt_email => order.original[:order][:user_email]
)
end
end
Because of the callback defined in this module (please scroll down):
module OrderLifecycle
extend ActiveSupport::Concern
include ActiveSupport::Callbacks
included do
state_machine :initial => :pending do
# States
event :pre_authorize_stripe_payment do
transition :pending => :stripe_payment_pre_authorized
end
event :write_encoded_preview_image_to_files do
transition :stripe_payment_pre_authorized => :encoded_preview_image_written_to_files # this line here!!! and...
end
event :render_final_image do
transition :encoded_preview_image_written_to_files => :final_image_rendered
end
event :upload_composition_to_parse do
transition :final_image_rendered => :composition_uploaded_to_parse
end
event :upload_print_file_to_printer do
transition :composition_uploaded_to_parse => :print_file_uploaded_to_printer
end
event :call_to_process_message do
transition :print_file_uploaded_to_printer => :process_message_called
end
event :capture_stripe_payment do
transition :process_message_called => :stripe_payment_captured
end
event :mark_as_complete do
transition :stripe_payment_captured => :complete
end
# Callbacks
before_transition :on => :pre_authorize_stripe_payment do |order|
PreAuthorizeStripePaymentJob.perform_later order
end
before_transition :on => :write_encoded_preview_image_to_files do |order|
WriteEncodedPreviewImageToFilesJob.perform_later order # this line here!!!
end
before_transition :on => :render_final_image do |order|
RenderFinalImageJob.perform_later order
end
before_transition :on => :upload_composition_to_parse do |order|
UploadCompositionToParseJob.perform_later order
end
before_transition :on => :upload_print_file_to_printer do |order|
UploadPrintFileToPrinterJob.perform_later order
end
before_transition :on => :call_to_process_message do |order|
CallToProcessMessageJob.perform_later order
end
before_transition :on => :capture_stripe_payment do |order|
CaptureStripePaymentJob.perform_later order
end
end
end
end
My config/sidekiq.yml is
---
:verbose: true
:queues:
- imagery_queue
- parse_queue
- stripe_queue
- printer_api_queue
When I start Sidekiq initially the output to the terminal is:
2015-12-14T03:00:25.339Z 7664 TID-akza0 INFO: Booting Sidekiq 3.5.0 with redis options {:url=>"redis://localhost:6379"}
m,
`$b
.ss, $$b .,d$
`$$P,d$P' .,md$$P' ____ _ _ _ _
,$$$$$$bmmd$$$^' / ___|(_) __| | ___| | _(_) __ _
,d$$$$$$$$$$$P \___ \| |/ _` |/ _ \ |/ / |/ _` |
$s^' `"^$$$' ___) | | (_| | __/ <| | (_| |
$: ,$$P |____/|_|\__,_|\___|_|\_\_|\__, |
`b :$$ |_|
$$:
$$
.d$$
2015-12-14T03:00:28.076Z 7664 TID-akza0 INFO: Running in ruby 2.2.2p95 (2015-04-13 revision 50295) [x86_64-linux]
2015-12-14T03:00:28.077Z 7664 TID-akza0 INFO: See LICENSE and the LGPL-3.0 for licensing details.
2015-12-14T03:00:28.077Z 7664 TID-akza0 INFO: Upgrade to Sidekiq Pro for more features and support: http://sidekiq.org
2015-12-14T03:00:28.091Z 7664 TID-akza0 DEBUG: Middleware: Sidekiq::Middleware::Server::Logging, Sidekiq::Middleware::Server::RetryJobs, Sidekiq::Middleware::Server::ActiveRecord
2015-12-14T03:00:28.091Z 7664 TID-akza0 INFO: Starting processing, hit Ctrl-C to stop
2015-12-14T03:00:28.104Z 7664 TID-wjesw DEBUG: {:queues=>["imagery_queue", "parse_queue", "stripe_queue", "printer_api_queue"], :labels=>[], :concurrency=>25, :require=>".", :environment=>nil, :timeout=>8, :poll_interval_average=>nil, :average_scheduled_poll_interval=>15, :error_handlers=>[#<Sidekiq::ExceptionHandler::Logger:0x00000002a28dc8>], :lifecycle_events=>{:startup=>[], :quiet=>[], :shutdown=>[]}, :dead_max_jobs=>10000, :dead_timeout_in_seconds=>15552000, :verbose=>true, :config_file=>"config/sidekiq.yml", :strict=>true, :tag=>"xxxxxxxxx"}
The problem was in calling the write_encoded_preview_image_to_files method prematurely. Substituting write_encoded_preview_image_to_files! and inserting a binding.pry revealed:
9: def run_phase_of_order_lifecycle(order)
10: response = create_stripe_charge(order)
11: if response['failure_code'] == nil
12: order.stripe_charge_id = response['id']
13: order.save!
14: binding.pry
15: order.write_encoded_preview_image_to_files!
16: end
=> 17: response
18: end
[1] pry(#<PreAuthorizeStripePaymentJob>)> order.state
=> "pending"
[2] pry(#<PreAuthorizeStripePaymentJob>)> order.write_encoded_preview_image_to_files!
StateMachines::InvalidTransition: Cannot transition state via :write_encoded_preview_image_to_files from :pending (Reason(s): State cannot transition via "write encoded preview image to files")
from /home/*****/.rvm/gems/ruby-2.2.2/gems/state_machines-0.4.0/lib/state_machines/event.rb:224:in `block in add_actions'
[3] pry(#<PreAuthorizeStripePaymentJob>)> order.write_encoded_preview_image_to_files
Then checking the order's state in the rails console after the job had been processed:
2.2.2 :044 > Order.last.state
Order Load (1.2ms) SELECT "orders".* FROM "orders" ORDER BY "orders"."id" DESC LIMIT 1
=> "stripe_payment_pre_authorized"
2.2.2 :045 >
Therefore this problem may be solved by adding the following callback to the OrderLifecycle module:
after_transition :on => :pre_authorize_stripe_payment do |order|
order.write_encoded_preview_image_to_files
end
Related
I have an issue with my Ruby on Rails application. So, there are workers, which are listening for different rabbitMQ topics. Each worker make some changes it it's DB (mariaDB) table and in the end update 'last_connection' field in common object 'device', where I have issue.
This is one worker:
include Sneakers::Worker
# This worker will connect to "queue" queue
# env is set to nil since by default the actuall queue name would be
# "queue_development"
from_queue "sensor_log"
# work method receives message payload in raw format
def work(raw_message)
logger.info ("sensor_log " + raw_message)
msg = JSON.parse(raw_message)
# msg = {"deviceId" => 102,"timestamp" => 1487318555,"sensor" => 5, "values" => [1,2,3,4,5,6,7,8], "isNewSensor" => false, "logDelayed" => false}
begin
#device = Device.find(msg["deviceId"])
ActiveRecord::Base.transaction do
# Initialize
timestamp = Time.at(msg["timestamp"])
MiddlewareLog.create(
device_id: msg["deviceId"],
message: JSON.pretty_generate(msg),
queue: MiddlewareLog.queues[:sensor_log]
)
if(msg["ext_brd_type"] == 6)
logger.info ("Logging external sensors lm2, lm4, lm4")
# Logging external sensors lm2, lm4, lm4
Sensors::SENSORS_EXT.map do |code|
SensorLog.create(
device_id: msg["deviceId"],
sensor: code,
state: msg[code],
value: msg[code],
is_delayed: msg["logDelayed"],
created_at: timestamp
)
end
else
logger.info ("Logging native sensors")
# Logging native
device_sensors = #device.new_version? ? Sensors::SENSORS_CODES_NEW : Sensors::SENSORS_CODES
#sensors = device_sensors.reject{|code, sensor| code & msg["sensor"] == 0}
#sensors.map do |code, sensor|
SensorLog.create(
device_id: msg["deviceId"],
sensor: sensor[:name],
state: sensor[:state],
value: msg["values"][sensor[:bit_position]],
is_delayed: msg["logDelayed"],
created_at: timestamp
)
end
Rollbar.warning("Unknown device sensor", :message => msg, :sensor => msg["sensor"]) if #sensors.empty?
#device.update_sensors_state(msg["values"]) if #sensors.any?
end
# Avoid updated_at deadlock
#device.save!(touch: false)
end
# Touch updated_at and last_connection_at
#device.touch(:last_connection_at)
ack! # we need to let queue know that message was received
rescue => exception
logger.error ("sensors_log exception:")
logger.error exception
Rollbar.error(exception, :message => msg)
requeue!
end
end
end
Here is the second one:
class SystemLogsWorker
include Sneakers::Worker
# This worker will connect to "queue" queue
# env is set to nil since by default the actuall queue name would be
# "queue_development"
from_queue "system_log"
# #logger = Logger.new(STDOUT)
# #logger.level = Logger::INFO
# work method receives message payload in raw format
def work(raw_message)
# #logger.info raw_message
logger.info ("system_log " + raw_message)
msg = JSON.parse(raw_message)
# msg = {"deviceId":102,"timestamp":1487318555,"system":2069,"logDelayed":false,"fault_code":1}
begin
#device = Device.find(msg["deviceId"])
ActiveRecord::Base.transaction do
# Initialize
timestamp = Time.at(msg["timestamp"])
MiddlewareLog.create(
device_id: msg["deviceId"],
message: JSON.pretty_generate(msg),
queue: MiddlewareLog.queues[:system_log]
)
#system = Systems::EVENTS_CODES[msg["system"]]
# #logger.warn("Unknown device system", :message => msg, :system => msg[:system]) unless #system
# logger.warn("Unknown device system", :message => msg, :system => msg["system"]) unless #system
# Rollbar.warning("Unknown device system", :message => msg, :system => msg["system"]) unless #system
logger.warn("Unknown device system. Message:" + raw_message) unless #system
Rollbar.warning("Unknown device system. Message:" + raw_message) unless #system
# Loggin
system_log = SystemLog.create(
device_id: msg["deviceId"],
system: #system[:name],
state: #system[:state],
is_delayed: msg["logDelayed"],
fault_code: msg["fault_code"],
created_at: timestamp
) if #system
#device.update_systems_state(system_log) if #system
# Avoid updated_at deadlock
#device.save!(touch: false)
end
# Touch updated_at and last_connection_at
#device.touch(:last_connection_at)
ack! # we need to let queue know that message was received
rescue => exception
logger.error ("system_log exception:")
logger.error exception
Rollbar.error(exception, :message => msg)
requeue!
end
end
end
In the runtime I get the message:
2020-06-18T11:09:08Z p-13299 t-gmvtsrzac ERROR: sensors_log exception:
2020-06-18T11:09:08Z p-13299 t-gmvtsrzac ERROR: Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction: UPDATE devices SET devices.updated_at = '2020-06-18 11:09:08', devices.last_connection_at = '2020-06-18 11:09:08' WHERE devices.id = 3024
2020-06-18T11:09:08Z p-13299 t-gmvtsq74w ERROR: system_log exception:
2020-06-18T11:09:08Z p-13299 t-gmvtsq74w ERROR: Mysql2::Error: Deadlock found when trying to get lock; try restarting transaction: UPDATE devices SET devices.updated_at = '2020-06-18 11:09:08', devices.last_connection_at = '2020-06-18 11:09:08' WHERE devices.id = 3024
I think that the problem point in
#device.touch(:last_connection_at), because in one time both workers trying to update one table row.
I'm not so good with ruby and will be glad to any help with this.
Have you tried using a lock within the transaction before updating the database data?
#device.lock!
#device.save!(touch: false)
#device.touch(:last_connection_at)
You can also start a lock and transaction at the same time using with_lock:
#device = Device.find(msg["deviceId"])
#device.with_lock do
# your block here
end
As described in https://api.rubyonrails.org/classes/ActiveRecord/Locking/Pessimistic.html
when console is launched
while at console prompt
How it should work?
See the output here. Simple, quick methods. T.me (current tenant), T.names (tenants in the DB), ...
Launch, ask for tenant selection, set
$ bin/rails c
Running via Spring preloader in process 11233
Loading development environment (Rails 5.1.5)
(1.9ms) SELECT "public"."tenants"."subdomain" FROM "public"."tenants" WHERE "public"."tenants"."deleted_at" IS NULL ORDER BY "public"."tenants"."created_at" DESC
Available tenants: {0=>"public", 1=>"local"}
Select tenant: 1
You are now Tenant 'local'
Frame number: 0/24
Switch tenant
[1] [my-project][development] pry(main)> T.ask
Available tenants: {0=>"public", 1=>"local"}
Select tenant: 0
You are now Tenant 'public'
=> nil
Switch again
[2] [my-project][development] pry(main)> T.ask
Available tenants: {0=>"public", 1=>"local"}
Select tenant: 1
You are now Tenant 'local'
=> nil
Current tenant
[3] [my-project][development] pry(main)> T.me
=> "local"
Tenant we can quickly switch to
[4] [my-project][development] pry(main)> T.hash
=> {0=>"public", 1=>"local"}
Tenant names
[5] [my-project][development] pry(main)> T.names
=> ["local"]
Is abc a tenant?
[6] [my-project][development] pry(main)> T.exists? 'abc'
=> false
Is local a tenant?
[7] [my-project][development] pry(main)> T.exists? 'local'
=> true
Note: This is not tested thoroughly. Please test before using. This code just gives you some idea, how I have been using these small shortcuts to save time during development. Thank you for reading.
Put it inside <project-root>/.pryrc
# What is it?
# => Helper methods for Apartment::Tenant gem
# How does it work?
# * bin/rails console => auto-loads and asks to switch tenant
# * T.ask => anytime in console, to switch tenant from a list
# * T.me => same as Apartment::Tenant.current
# * T.hash => hash of tenants. Example: { 0 => "public", 1 => "tenant-a" }
# * T.names => array with all existing tenant names from DB
# * T.exists?(arg) => returns true/false if `arg` exists as tenant in DB
# * T.switch!(arg) => same as Apartment::Tenant.switch!
require "rubygems"
# convenience class
class T
class << self
# ['tenant1', 'tenant2', ...]
def names
##names ||= Apartment.tenant_names.sort
end
# { 0 => 'public', 1 => 'tenant1', ...}
def hash
##hash ||= { 0 => 'public' }.merge(
(1..(T.names.length)).to_a
.product(T.names)
.to_h
)
end
def switch! arg
Apartment::Tenant.switch!(arg) if T.hash.value?(arg)
end
# current tenant
def me
Apartment::Tenant.current
end
def exists? arg
T.names.include? arg
end
# ask to switch the tenant
def ask
WelcomeClass.select_tenant
end
end
end
# select tenant when entering console
class WelcomeClass
def self.select_tenant
puts "Available tenants: #{T.hash}"
print "Select tenant: "
tenant = gets.strip # ask which one?
unless tenant.empty?
# by name
if T.exists?(tenant)
T.switch!(tenant)
# by index position
# string has digit + tenant index present
elsif tenant[/\d/].present? && T.hash.key?(tenant.to_i)
T.switch!(T.hash[tenant.to_i])
# not found = no action
else
puts "Tenant not found in list '#{tenant}'"
end
end
# announce current tenant
puts "You are now Tenant '#{T.me}'"
end
end
# run the code at `bin/rails console`
Pry.config.exec_string = WelcomeClass.select_tenant
An update is needed for the accepted answer: the T 'hash' method is creating a hash with the right number of keys but the values for all keys are duplicated with the last tenant name (0 => 'public', 1 => 'test', 2 => 'test' .. x => 'test'). Here's a working 'hash' method:
def hash
##hash ||= Hash[(0..T.names.size - 1).zip T.names]
end
bazfer answer is partially correct, it was forgotten public tenant
def hash
##hash ||= { 0 => 'public' }.merge(Hash[(1..T.names.size).zip T.names])
end
Please add to bazfer answer and to accepted answer
I've recently imported around 50 000 records into an ActiveAdmin application and am now experiencing very poor performance. Is ActiveAdmin designed to handle this amount of records?
I have got slightly improved performance by adding in remove_filter for the filters I am not using.
I don't have any associations, which I know causes some performance issues. My model structure is completely flat with a couple of sub classes using single table inheritance.
I'm using:
ruby '2.1.1'
gem 'rails', '4.1.0'
gem 'activeadmin', github: 'activeadmin'
I've got the application deployed to Heroku. I've inserted some logs from Heroku down the bottom.
Here's my model code:
class Product < ActiveRecord::Base
# Scopes
scope :upward_trending, -> { where( "status > ?", 100) }
scope :downward_trending, -> { where( "status < ?", 100) }
scope :uncategorised, -> {where(category: '') }
scope :categorised, -> {where.not(category: '') }
end
Here's my resource code:
ActiveAdmin.register Product do
menu :label => "All Products", :priority => 1
config.clear_action_items!
permit_params :name, :link, :category, :image_url, :price, :interest, :interest_changes, :revenue, :start_date, :end_date, :company, :country, :price_eur, :price_gbp, :price_aud, :price_nzd, :price_cad
# Input
form do |f|
f.inputs 'Details' do
f.input :country, :as => :string
f.input :category
end
f.actions
end
# Scopes
scope :upward_trending, :default => true
scope :downward_trending
scope :all
# Default Sort
config.sort_order = "end_date_desc"
index do
column "Product Name", :sortable => :name do |a|
link_to a.name, a.link, :target => "_blank"
end
column "Image" do |a|
div :class => "image_url" do
link_to (image_tag a.image_url, class: 'image_url'), a.image_url, :target => "_blank", class: 'fancybox'
end
end
column "Price", :sortable => :price_eur do |a|
div :class => "number" do
case current_user.currency
when 'EUR'
number_to_currency(a.price_eur, unit: "€")
when 'GBP'
number_to_currency(a.price_gbp, unit: "£")
when 'AUD'
number_to_currency(a.price_aud, unit: "$")
when 'CAD'
number_to_currency(a.price_cad, unit: "$")
when 'NZD'
number_to_currency(a.price_nzd, unit: "$")
else
number_to_currency(a.price, unit: "$")
end
end
end
column "Status", :sortable => :status do |a|
div :class => "average" do
number_to_percentage(a.status, precision: 0)
end
end
column :category
column "Updated", :sortable => "end_date" do |a|
if a.end_date > Time.now - 5.days
distance_of_time_in_words(a.end_date, Time.now, include_seconds: true) + " ago"
else
a.end_date.to_formatted_s(:long)
end
end
#column :company
#column :country
end
# Sidebar
#sidebar :ProductSearch, :priority => 1 do
# render partial: 'admin/search_products', :locals => {:model_name => 'products'}
#end
# Filters
filter :category, :as => :check_boxes, :collection => proc { Product.all.collect {|dd| dd.category}.uniq.sort }
#filter :name, :label => "Product Name", :as => :string, filters: ['contains']
#filter :price, :label => "USD Price"
#filter :interest, :label => "Units Sold"
#filter :company, :as => :select, :collection => proc { Product.all.collect {|dd| dd.company}.uniq.sort }
filter :country, :as => :select, :collection => proc { Product.all.collect {|dd| dd.country}.uniq.sort }
filter :end_date, :label => "Date"
remove_filter :link
remove_filter :image_url
remove_filter :price
remove_filter :interest
remove_filter :interest_changes
remove_filter :revenue
remove_filter :start_date
remove_filter :price_eur
remove_filter :price_gbp
remove_filter :price_aud
remove_filter :price_nzd
remove_filter :price_cad
end
Here are some logs from Heroku when loading the resources, in this case it timed out.
2014-09-17T21:22:09.778167+00:00 app[web.1]: Started GET "/admin/products" for 91.226.23.198 at 2014-09-17 21:22:09 +0000
2014-09-17T21:22:09.786533+00:00 app[web.1]: Processing by Admin::ProductsController#index as HTML
2014-09-17T21:22:25.828163+00:00 heroku[web.1]: source=web.1 dyno=heroku.29301280.ba6942e6-4473-477d-8fa9-b3de141f9f06 sample#load_avg_1m=0.08 sample#load_avg_5m=0.09 sample#load_avg_15m=0.04
2014-09-17T21:22:25.828431+00:00 heroku[web.1]: source=web.1 dyno=heroku.29301280.ba6942e6-4473-477d-8fa9-b3de141f9f06 sample#memory_total=670.15MB sample#memory_rss=511.80MB sample#memory_cache=0.00MB sample#memory_swap=15
8.34MB sample#memory_pgpgin=352746pages sample#memory_pgpgout=221723pages
2014-09-17T21:22:25.829347+00:00 heroku[web.1]: Process running mem=670M(130.9%)
2014-09-17T21:22:25.829678+00:00 heroku[web.1]: Error R14 (Memory quota exceeded)
2014-09-17T21:22:39.775186+00:00 heroku[router]: at=error code=H12 desc="Request timeout" method=GET path="/admin/products" host=*.herokuapp.com request_id=e3abc8d7-f52d-47b2-bbb0-161823e1a596 fwd="91.226.23.198" d
yno=web.1 connect=1ms service=30001ms status=503 bytes=0
2014-09-17T21:22:40.763399+00:00 app[web.1]: E, [2014-09-17T21:22:40.714804 #2] ERROR -- : worker=0 PID:127 timeout (31s > 30s), killing
2014-09-17T21:22:41.133007+00:00 app[web.1]: E, [2014-09-17T21:22:41.132895 #2] ERROR -- : reaped #<Process::Status: pid 127 SIGKILL (signal 9)> worker=0
2014-09-17T21:22:43.505823+00:00 app[web.1]: I, [2014-09-17T21:22:43.491614 #158] INFO -- : worker=0 ready
2014-09-17T21:22:46.406853+00:00 heroku[router]: at=info method=GET path="/favicon.ico" host=x.herokuapp.com request_id=9769d818-5231-44db-ab19-d6f7597c308b fwd="91.226.23.198" dyno=web.1 connect=1ms service=5666ms
status=304 bytes=111
EDIT:
I've tried adding an index on end_date as am sorting descending with this. Unfortunately this made little change on the load times:
Sep 17 15:22:34 x app/web.1: Completed 200 OK in 8556ms (Views: 7377.9ms | ActiveRecord: 1173.7ms)
Sep 17 15:23:07 x app/web.1: Completed 200 OK in 8864ms (Views: 7640.8ms | ActiveRecord: 1220.0ms)
Sep 17 15:28:47 x app/web.1: Completed 200 OK in 9551ms (Views: 8039.2ms | ActiveRecord: 1442.5ms)
Sep 17 15:29:01 x app/web.1: Completed 200 OK in 8921ms (Views: 7651.1ms | ActiveRecord: 1264.0ms)
Looks like the culprit was this line of code:
filter :country, :as => :select, :collection => proc { Product.all.collect {|dd| dd.country}.uniq.sort }
Changed it to:
filter :country, :as => :select, :collection => proc { Product.pluck(:country).uniq.sort }
I'm working my first project using Neo4j. I'm parsing wikipedia's page and pagelinks dumps to create a graph where the nodes are pages and the edges are links.
I've defined some rake tasks that download the dumps, parse the data, and save it in a Neo4j database. At the end of the rake task I print the number of pages and links created, and some of the pages with the most links. Here is the output of the raks task for the zawiki.
$ rake wiki[zawiki]
[ omitted ]
...
:: Done parsing zawiki
:: 1984 pages
:: 2144 links
:: The pages with the most links are:
9625.0 - Emijrp/List_of_Wikipedians_by_number_of_edits_(bots_included): 40
1363.0 - Gvangjsih_Bouxcuengh_Swcigih: 30
9112.0 - Fuzsuih: 27
1367.0 - Cungzcoj: 26
9279.0 - Vangz_Yenfanh: 19
It looks like pages and links are being created, but when I start a rails console, or the server the links aren't found.
$ rails c
jruby-1.7.5 :013 > Pages.all.count
=> 1984
jruby-1.7.5 :003 > Pages.all.reduce(0) { |count, page| count + page.links.count}
=> 0
jruby-1.7.5 :012 > Pages.all.sort_by { |p| p.links.count }.reverse[0...5].map { |p| p.links.count }
=> [0, 0, 0, 0, 0]
Here is the rake task, and this is the projects github page. Can anyone tell me why the links aren't saved?
DUMP_DIR = Rails.root.join('lib','assets')
desc "Download wiki dumps and parse them"
task :wiki, [:wiki] => 'wiki:all'
namespace :wiki do
task :all, [:wiki] => [:get, :parse] do |t, args|
# Print info about the newly created pages and links.
link_count = 0
Pages.all.each do |page|
link_count += page.links.count
end
indent "Done parsing #{args[:wiki]}"
indent "#{Pages.count} pages"
indent "#{link_count} links"
indent "The pages with the most links are:"
Pages.all.sort_by { |a| a.links.count }.reverse[0...5].each do |page|
puts "#{page.page_id} - #{page.title}: #{page.links.count}"
end
end
desc "Download wiki page and page links database dumps to /lib/assets"
task :get, :wiki do |t, args|
indent "Downloading dumps"
sh "#{Rails.root.join('lib', "get_wiki").to_s} #{args[:wiki]}"
indent "Done"
end
desc "Parse all dumps"
task :parse, [:wiki] => 'parse:all'
namespace :parse do
task :all, [:wiki] => [:pages, :pagelinks]
desc "Read wiki page dumps from lib/assests into the database"
task :pages, [:wiki] => :environment do |t, args|
parse_dumps('page', args[:wiki]) do |obj|
page = Pages.create_from_dump(obj)
end
indent = "Created #{Pages.count} pages"
end
desc "Read wiki pagelink dumps from lib/assests into the database"
task :pagelinks, [:wiki] => :environment do |t, args|
errors = 0
parse_dumps('pagelinks', args[:wiki]) do |from_id, namespace, to_title|
from = Pages.find(:page_id => from_id)
to = Pages.find(:title => to_title)
if to.nil? || from.nil?
errors = errors.succ
else
from.links << to
from.save
end
end
end
end
end
def indent *args
print ":: "
puts args
end
def parse_dumps(dump, wiki_match, &block)
wiki_match ||= /\w+/
DUMP_DIR.entries.each do |file|
file, wiki = *(file.to_s.match(Regexp.new "(#{wiki_match})-#{dump}.sql"))
if file
indent "Parsing #{wiki} #{dump.pluralize} from #{file}"
each_value(DUMP_DIR.join(file), &block)
end
end
end
def each_value(filename)
f = File.open(filename)
num_read = 0
begin # read file until line starting with INSERT INTO
line = f.gets
end until line.match /^INSERT INTO/
begin
line = line.match(/\(.*\)[,;]/)[0] # ignore begining of line until (...) object
begin
yield line[1..-3].split(',').map { |e| e.match(/^['"].*['"]$/) ? e[1..-2] : e.to_f }
num_read = num_read.succ
line = f.gets.chomp
end while(line[0] == '(') # until next insert block, or end of file
end while line.match /^INSERT INTO/ # Until line doesn't start with (...
f.close
end
app/models/pages.rb
class Pages < Neo4j::Rails::Model
include Neo4j::NodeMixin
has_n(:links).to(Pages)
property :page_id
property :namespace, :type => Fixnum
property :title, :type => String
property :restrictions, :type => String
property :counter, :type => Fixnum
property :is_redirect, :type => Fixnum
property :is_new, :type => Fixnum
property :random, :type => Float
property :touched, :type => String
property :latest, :type => Fixnum
property :length, :type => Fixnum
property :no_title_convert, :type => Fixnum
def self.create_from_dump(obj)
# TODO: I wonder if there is a way to compine these calls
page = {}
# order of this array is important, it corresponds to the data in obj
attrs = [:page_id, :namespace, :title, :restrictions, :counter, :is_redirect,
:is_new, :random, :touched, :latest, :length, :no_title_convert]
attrs.each_index { |i| page[attrs[i]] = obj[i] }
page = Pages.create(page)
return page
end
end
I must admit that I have no idea of how Neo4j works.
Transferring from other databases though, I too assume that either some validation is wrong, or maybe even something is misconfigured in your use of the database. The latter I can't give any advice on where to look, but if it's about validation, you can look at Page#errors or try calling Page#save! and see what it raises.
One crazy idea that just came to mind looking at this example is that maybe for that relation to be configured properly, you need a back reference, too.
Maybe has_n(:links).to(Page, :links) will help you. Or, if that doesn't work:
has_n(:links_left).to(Page, :links_right)
has_n(:links_right).from(Page, :links_left)
The more I look at this, the more I think the back reference to the same table is not configured properly and thus won't validate.
I'm building an app on Herokou and Redis that sends an SMS messages for every row in an input CSV file which contains the mobile phone number. The message is sent using Twilio in a sidekiq worker shown below. The problem is that even though the SMS is being sent for all the rows in the CSV, the database write (TextMessage.create) and log write (puts statement) only executes for one row in the CSV. There is one Sidekiq worker spawned for each row in the CSV file. It seems like only one Sidekiq worker has I/O (DB, file) access and it locks it from the other Sidekiq workers. Any help would be appreciated.
sidekiq worker:
require 'sidekiq'
require 'twilio-rb'
class TextMessage < ActiveRecord::Base
include Sidekiq::Extensions
def self.send_message(number, body, row_index, column_index, table_id)
puts "TextMessage#send_message: ROW INDEX: #{row_index} COLUMN INDEX: #{column_index} TABLEID: #{table_id} BODY: #{body} PHONE: #{number}"
Twilio::Config.setup :account_sid => 'obfuscated', :auth_token => '<obfuscated>'
sms = Twilio::SMS.create :to => number, :from => '+17085555555', :body => body + ' | Sent: ' + Time.now.in_time_zone('Central Time (US & Canada)').strftime("%m/%d/%Y %I:%M%p Central")
TextMessage.create :to => number, :from => '+17085555555'
ImportCell.add_new_column(table_id, row_index, column_index, "Time Sent", Time.now.in_time_zone('Central Time (US & Canada)').strftime("%m/%d/%Y %I:%M%p Central"))
end
end
call to sidekiq worker:
TextMessage.delay_until(time_to_send, :retry => 3).send_message(phone, 'Scheduled: ' + time_to_send.in_time_zone('Central Time (US & Canada)').strftime("%m/%d/%Y %I:%M%p Central"), row_index, column_index, table.id)
column_index += 1
Heroku Procfile
worker: bundle exec sidekiq -C config/sidekiq.yml
sidekiq.yml
:verbose: false
:concurrency: 3
:queues:
- [default, 5]
config/initializers/redis.rb:
uri = URI.parse(ENV["REDISTOGO_URL"])
REDIS = Redis.new(:host => uri.host, :port => uri.port, :password => uri.password)
Sidekiq.configure_server do |config|
database_url = ENV['DATABASE_URL']
if(database_url)
ENV['DATABASE_URL'] = "#{database_url}?pool=25"
ActiveRecord::Base.establish_connection
end
end
I am one of the people who commented on your question, just fixed it!
You are using .create which SideKiq seemed to not like, so I tried using .new and then .save which made it work! I think it has to do with .create not being thread safe or something of the sort, but I honestly have no idea.
Non Working code:
class HardWorker
include Sidekiq::Worker
def perform(name, count)
puts 'Doing some hard work!'
UserInfo.create(
:user => "someone",
:misc1 => 0,
:misc2 => 0,
:misc3 => 0,
:comment => "Made from HardWorker",
:time_changed => Time.now
)
puts 'Done with hard work!'
end
end
Working code:
class HardWorker
include Sidekiq::Worker
def perform(name, count)
puts 'Doing some hard work!'
a_row = UserInfo.new(
:user => "someone",
:misc1 => 0,
:misc2 => 0,
:misc3 => 0,
:comment => "Made from HardWorker",
:time_changed => Time.now
)
a_row.save
puts 'Done with hard work!'
end
end