Celery Daemon "cannot connect to amqp" - connection

I'm trying to run celery as a daemon in the background on Ubuntu 14.04.
I've followed the instructions at http://celery.readthedocs.org/en/latest/tutorials/daemonizing.html and used the celeryd shell script
#!/bin/sh -e
# ============================================
# celeryd - Starts the Celery worker daemon.
# ============================================
#
# :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}
# :Configuration file: /etc/default/celeryd
#
# See http://docs.celeryproject.org/en/latest/tutorials/daemonizing.html#generic-init-scripts
### BEGIN INIT INFO
# Provides: celeryd
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: celery task worker daemon
### END INIT INFO
#
#
# To implement separate init scripts, copy this script and give it a different
# name:
# I.e., if my new application, "little-worker" needs an init, I
# should just use:
#
# cp /etc/init.d/celeryd /etc/init.d/little-worker
#
# You can then configure this by manipulating /etc/default/little-worker.
#
VERSION=10.1
echo "celery init v${VERSION}."
if [ $(id -u) -ne 0 ]; then
echo "Error: This program can only be used by the root user."
echo " Unprivileged users must use the 'celery multi' utility, "
echo " or 'celery worker --detach'."
exit 1
fi
# Can be a runlevel symlink (e.g. S02celeryd)
if [ -L "$0" ]; then
SCRIPT_FILE=$(readlink "$0")
else
SCRIPT_FILE="$0"
fi
SCRIPT_NAME="$(basename "$SCRIPT_FILE")"
DEFAULT_USER="celery"
DEFAULT_PID_FILE="/var/run/celery/%n.pid"
DEFAULT_LOG_FILE="/var/log/celery/%n.log"
DEFAULT_LOG_LEVEL="INFO"
DEFAULT_NODES="celery"
DEFAULT_CELERYD="-m celery worker --detach"
CELERY_DEFAULTS=${CELERY_DEFAULTS:-"/etc/default/${SCRIPT_NAME}"}
# Make sure executable configuration script is owned by root
_config_sanity() {
local path="$1"
local owner=$(ls -ld "$path" | awk '{print $3}')
local iwgrp=$(ls -ld "$path" | cut -b 6)
local iwoth=$(ls -ld "$path" | cut -b 9)
if [ "$(id -u $owner)" != "0" ]; then
echo "Error: Config script '$path' must be owned by root!"
echo
echo "Resolution:"
echo "Review the file carefully and make sure it has not been "
echo "modified with mailicious intent. When sure the "
echo "script is safe to execute with superuser privileges "
echo "you can change ownership of the script:"
echo " $ sudo chown root '$path'"
exit 1
fi
if [ "$iwoth" != "-" ]; then # S_IWOTH
echo "Error: Config script '$path' cannot be writable by others!"
echo
echo "Resolution:"
echo "Review the file carefully and make sure it has not been "
echo "modified with malicious intent. When sure the "
echo "script is safe to execute with superuser privileges "
echo "you can change the scripts permissions:"
echo " $ sudo chmod 640 '$path'"
exit 1
fi
if [ "$iwgrp" != "-" ]; then # S_IWGRP
echo "Error: Config script '$path' cannot be writable by group!"
echo
echo "Resolution:"
echo "Review the file carefully and make sure it has not been "
echo "modified with malicious intent. When sure the "
echo "script is safe to execute with superuser privileges "
echo "you can change the scripts permissions:"
echo " $ sudo chmod 640 '$path'"
exit 1
fi
}
if [ -f "$CELERY_DEFAULTS" ]; then
_config_sanity "$CELERY_DEFAULTS"
echo "Using config script: $CELERY_DEFAULTS"
. "$CELERY_DEFAULTS"
fi
# Sets --app argument for CELERY_BIN
CELERY_APP_ARG=""
if [ ! -z "$CELERY_APP" ]; then
CELERY_APP_ARG="--app=$CELERY_APP"
fi
CELERYD_USER=${CELERYD_USER:-$DEFAULT_USER}
# Set CELERY_CREATE_DIRS to always create log/pid dirs.
CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0}
CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS
CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS
if [ -z "$CELERYD_PID_FILE" ]; then
CELERYD_PID_FILE="$DEFAULT_PID_FILE"
CELERY_CREATE_RUNDIR=1
fi
if [ -z "$CELERYD_LOG_FILE" ]; then
CELERYD_LOG_FILE="$DEFAULT_LOG_FILE"
CELERY_CREATE_LOGDIR=1
fi
CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
CELERY_BIN=${CELERY_BIN:-"celery"}
CELERYD_MULTI=${CELERYD_MULTI:-"$CELERY_BIN multi"}
CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES}
export CELERY_LOADER
if [ -n "$2" ]; then
CELERYD_OPTS="$CELERYD_OPTS $2"
fi
CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE`
CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE`
# Extra start-stop-daemon options, like user/group.
if [ -n "$CELERYD_CHDIR" ]; then
DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR"
fi
check_dev_null() {
if [ ! -c /dev/null ]; then
echo "/dev/null is not a character device!"
exit 75 # EX_TEMPFAIL
fi
}
maybe_die() {
if [ $? -ne 0 ]; then
echo "Exiting: $* (errno $?)"
exit 77 # EX_NOPERM
fi
}
create_default_dir() {
if [ ! -d "$1" ]; then
echo "- Creating default directory: '$1'"
mkdir -p "$1"
maybe_die "Couldn't create directory $1"
echo "- Changing permissions of '$1' to 02755"
chmod 02755 "$1"
maybe_die "Couldn't change permissions for $1"
if [ -n "$CELERYD_USER" ]; then
echo "- Changing owner of '$1' to '$CELERYD_USER'"
chown "$CELERYD_USER" "$1"
maybe_die "Couldn't change owner of $1"
fi
if [ -n "$CELERYD_GROUP" ]; then
echo "- Changing group of '$1' to '$CELERYD_GROUP'"
chgrp "$CELERYD_GROUP" "$1"
maybe_die "Couldn't change group of $1"
fi
fi
}
check_paths() {
if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then
create_default_dir "$CELERYD_LOG_DIR"
fi
if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then
create_default_dir "$CELERYD_PID_DIR"
fi
}
create_paths() {
create_default_dir "$CELERYD_LOG_DIR"
create_default_dir "$CELERYD_PID_DIR"
}
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
_get_pidfiles () {
# note: multi < 3.1.14 output to stderr, not stdout, hence the redirect.
${CELERYD_MULTI} expand "${CELERYD_PID_FILE}" ${CELERYD_NODES} 2>&1
}
_get_pids() {
found_pids=0
my_exitcode=0
for pidfile in $(_get_pidfiles); do
local pid=`cat "$pidfile"`
local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'`
if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then
echo "bad pid file ($pidfile)"
one_failed=true
my_exitcode=1
else
found_pids=1
echo "$pid"
fi
if [ $found_pids -eq 0 ]; then
echo "${SCRIPT_NAME}: All nodes down"
exit $my_exitcode
fi
done
}
_chuid () {
su "$CELERYD_USER" -c "$CELERYD_MULTI $*"
}
start_workers () {
if [ ! -z "$CELERYD_ULIMIT" ]; then
ulimit $CELERYD_ULIMIT
fi
_chuid $* start $CELERYD_NODES $DAEMON_OPTS \
--pidfile="$CELERYD_PID_FILE" \
--logfile="$CELERYD_LOG_FILE" \
--loglevel="$CELERYD_LOG_LEVEL" \
$CELERY_APP_ARG \
$CELERYD_OPTS
}
dryrun () {
(C_FAKEFORK=1 start_workers --verbose)
}
stop_workers () {
_chuid stopwait $CELERYD_NODES --pidfile="$CELERYD_PID_FILE"
}
restart_workers () {
_chuid restart $CELERYD_NODES $DAEMON_OPTS \
--pidfile="$CELERYD_PID_FILE" \
--logfile="$CELERYD_LOG_FILE" \
--loglevel="$CELERYD_LOG_LEVEL" \
$CELERY_APP_ARG \
$CELERYD_OPTS
}
kill_workers() {
_chuid kill $CELERYD_NODES --pidfile="$CELERYD_PID_FILE"
}
restart_workers_graceful () {
echo "WARNING: Use with caution in production"
echo "The workers will attempt to restart, but they may not be able to."
local worker_pids=
worker_pids=`_get_pids`
[ "$one_failed" ] && exit 1
for worker_pid in $worker_pids; do
local failed=
kill -HUP $worker_pid 2> /dev/null || failed=true
if [ "$failed" ]; then
echo "${SCRIPT_NAME} worker (pid $worker_pid) could not be restarted"
one_failed=true
else
echo "${SCRIPT_NAME} worker (pid $worker_pid) received SIGHUP"
fi
done
[ "$one_failed" ] && exit 1 || exit 0
}
check_status () {
my_exitcode=0
found_pids=0
local one_failed=
for pidfile in $(_get_pidfiles); do
if [ ! -r $pidfile ]; then
echo "${SCRIPT_NAME} down: no pidfiles found"
one_failed=true
break
fi
local node=`basename "$pidfile" .pid`
local pid=`cat "$pidfile"`
local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'`
if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then
echo "bad pid file ($pidfile)"
one_failed=true
else
local failed=
kill -0 $pid 2> /dev/null || failed=true
if [ "$failed" ]; then
echo "${SCRIPT_NAME} (node $node) (pid $pid) is down, but pidfile exists!"
one_failed=true
else
echo "${SCRIPT_NAME} (node $node) (pid $pid) is up..."
fi
fi
done
[ "$one_failed" ] && exit 1 || exit 0
}
case "$1" in
start)
check_dev_null
check_paths
start_workers
;;
stop)
check_dev_null
check_paths
stop_workers
;;
reload|force-reload)
echo "Use restart"
;;
status)
check_status
;;
restart)
check_dev_null
check_paths
restart_workers
;;
graceful)
check_dev_null
restart_workers_graceful
;;
kill)
check_dev_null
kill_workers
;;
dryrun)
check_dev_null
dryrun
;;
try-restart)
check_dev_null
check_paths
restart_workers
;;
create-paths)
check_dev_null
create_paths
;;
check-paths)
check_dev_null
check_paths
;;
*)
echo "Usage: /etc/init.d/${SCRIPT_NAME} {start|stop|restart|graceful|kill|dryrun|create-paths}"
exit 64 # EX_USAGE
;;
esac
exit 0
,
which I put in /etc/init.d/celeryd.
I've also got the following config filw (also called celeryd which lives in etc/default/celeryd
# Names of nodes to start
# most will only start one node:
CELERYD_NODES="worker"
# but you can also start multiple and configure settings
# for each in CELERYD_OPTS (see `celery multi --help` for examples).
# Absolute or relative path to the 'celery' command:
CELERY_BIN="/usr/local/bin/celery"
# App instance to use
# comment out this line if you don't use an app
CELERY_APP="proj"
# or fully qualified:
#CELERY_APP="proj.tasks:app"
# Where to chdir at start.
CELERYD_CHDIR="/home/drmclean/"
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
# %N will be replaced with the first part of the nodename.
CELERYD_LOG_FILE="/var/log/celery/%N.log"
CELERYD_PID_FILE="/var/run/celery/%N.pid"
# Workers should run as an unprivileged user.
# You need to create this user manually (or you can choose
# a user/group combination that already exists, e.g. nobody).
CELERYD_USER="drmclean"
CELERYD_GROUP="drmclean"
# If enabled pid and log directories will be created if missing,
# and owned by the userid/group configured.
CELERY_CREATE_DIRS=1
I can easily start the celery service running by using the command:
sudo /etc/init.d/celeryd start
and the service runs as I expect.
However on start-up the service never runs. When I inspect the logfile for the celery, it says.
"[2014-09-17 16:27:41,541: ERROR/MainProcess] consumer: Cannot connect to amqp://guest:**#127.0.0.1:5672//: [Errno 111] Connection refused.
Trying again in 2.00 seconds..."
Can anyone help with this error? I can't see when the connection would be refused on start-up but also when using the sudo /etc/init.d/celeryd start command?

Are you connecting from a remote server? If yes, the guest user is not allowed to connect from a remote server. See https://www.rabbitmq.com/access-control.html

No the entire thing is running on my own machine. Actually it appears that my celeryd script which is in:
/etc/init.d/celeryd
is never running or start-up. It's unclear why it isn't though.

Related

Jenkins pass a shell script variable to a down stream job

I'm new to Jenkins.
I have a job with an "Execute Shell" Build trigger, and in that shell script i initiate some variables with values i take from some source.
I need to pass these values to a downstream job.
The values i want to pass are $IMG_NAME and $IMG_PATH from this shell script:
#!/bin/bash -x
whoami
echo "BASE_PATH: $BASE_PATH"
declare -A BRANCHES
for i in $(echo $BRANCHES_LIST | tr ',' '\n'); do BRANCHES[$i]= ; done
echo 'user' | sudo -S umount -rf /mnt/tlv-s-comp-prod/drops/
echo 'user' | sudo -S mount.nfs -o nolock,nfsvers=3 tlv-s-comp-prod:/export/drops /mnt/tlv-s-comp-prod/drops
ls /mnt/tlv-s-comp-prod/drops/
echo "cleanup workspace"
rm ${WORKSPACE}/*.txt &> /dev/null
i="0"
while [ $i -lt 6 ]
do
if [[ ${BASE_PATH} == *"Baseline"* ]]; then
unset BRANCHES[#]
declare -A BRANCHES
BRANCHES[Baseline]=
fi
for BRANCH in "${!BRANCHES[#]}"; do
echo "BRANCH: $BRANCH"
if [ $BRANCH == "Baseline" ]; then BRANCH=; fi
img_dir=$(ls -td -- ${BASE_PATH}/${BRANCH}/*/ | head -n 1)
echo "img_dir: $img_dir"
IMG_PATH=$(ls $img_dir*.rpm)
echo "IMG_PATH: $IMG_PATH"
cd $img_dir
IMG_NAME=$(ls *.rpm) > env.properties
if [ ! -z "$IMG_NAME" ]; then
if [ $(( $(date +%s) - $(stat -c %Z $IMG_PATH) )) -lt 10000800 ]; then
echo "IMG_NAME: ${IMG_NAME}"
#BRANCHES[$BRANCH]=$IMG_PATH
#echo "REG_OSA_SOFTSYNC_BUILD_IMG_FULL_PATH=${BRANCHES[$BRANCH]}" >> ${WORKSPACE}/$BRANCH.txt
echo "BRANCH_NAME=$BRANCH" >> ${WORKSPACE}/${BRANCH}_branch.txt
echo "REG_OSA_SOFTSYNC_BUILD_NAME=$BRANCH-$IMG_NAME" >> ${WORKSPACE}/${BRANCH}_branch.txt
else
echo "$IMG_NAME is out dated"
fi
else
echo "IMG_NAME is empty"
fi
BRANCH_NAME=""
done
$TEMP=$BRANCH_NAME
echo "TEMP: $TEMP"
if [ $(ls ${WORKSPACE}/*_branch.txt | wc -l) == $(echo ${#BRANCHES[#]}) ]; then break; fi
#for i in $(ls *_branch.txt); do i=$(echo $i | awk -F '_branch.txt' '{print $1}'); if [ $(echo ${!BRANCHES[#]} | grep $i | wc -l) == 0 ]; then state=1 break; fi done
i=$[$i+1]
sleep 1800
done
This is the "Trigger parameterized build on other projects" configuration:

Ruby on rails with unicorn not readable file

I'm trying to build in ruby on rails environment but when I check the unicorn status it gives me the following error and I don't know what to do. I checked the path to the config.rb and it's there.
/usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/lib/unicorn/configurator.rb:644:in `parse_rackup_file': rackup file (status) not readable (ArgumentError)
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/lib/unicorn/configurator.rb:74:in `reload'
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/lib/unicorn/configurator.rb:65:in `initialize'
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/lib/unicorn/http_server.rb:76:in `new'
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/lib/unicorn/http_server.rb:76:in `initialize'
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/bin/unicorn:126:in `new'
from /usr/local/rvm/gems/ruby-2.2.2/gems/unicorn-5.1.0/bin/unicorn:126:in `<top (required)>'
from /usr/local/rvm/gems/ruby-2.2.2/bin/unicorn:23:in `load'
from /usr/local/rvm/gems/ruby-2.2.2/bin/unicorn:23:in `<main>'
from /usr/local/rvm/gems/ruby-2.2.2/bin/ruby_executable_hooks:15:in `eval'
from /usr/local/rvm/gems/ruby-2.2.2/bin/ruby_executable_hooks:15:in `<main>'
Unicorn config
#!/bin/bash
### BEGIN INIT INFO
# Provides: unicorn
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: unicorn initscript
# Description: Unicorn is an HTTP server for Rack application
### END INIT INFO
# based on http://gist.github.com/308216 by http://github.com/mguterl
#
## A sample /etc/unicorn/my_app.conf
##
## RAILS_ENV=production
## RAILS_ROOT=/var/apps/www/my_app/current
## PID=$RAILS_ROOT/tmp/unicorn.pid
## START_CMD="bundle exec unicorn"
## USER="www-data"
#PATH=/usr/local/bin:/usr/bin:/bin
set -e
sig () {
test -s "$PID" && kill -$1 `cat "$PID"`
}
oldsig () {
test -s "$OLD_PID" && kill -$1 `cat "$OLD_PID"`
}
run_cmd() {
if [ -z "$SUDO" ]; then
$CMD
else
$SUDO "$CMD"
fi
}
cmd () {
case $1 in
start)
sig 0 && echo >&2 "Already running" && return
echo "Starting"
run_cmd
;;
stop)
sig QUIT && echo "Stopping" && return
echo >&2 "Not running"
;;
force-stop)
sig TERM && echo "Forcing a stop" && return
echo >&2 "Not running"
;;
restart|reload)
sig USR2 && sleep 5 && oldsig QUIT && echo "Killing old master" `cat $OLD_PID` && return
echo >&2 "Couldn't reload, starting '$CMD' instead"
run_cmd
;;
upgrade)
sig USR2 && echo Upgraded && return
echo >&2 "Couldn't upgrade, starting '$CMD' instead"
run_cmd
;;
rotate)
sig USR1 && echo rotated logs OK && return
echo >&2 "Couldn't rotate logs" && return
;;
status)
sig 0 && echo >&2 "Already running" && return
echo >&2 "Not running" && return
;;
*)
echo >&2 "Usage: $0 <start|stop|restart|upgrade|rotate|force-stop>"
return
;;
esac
}
setup () {
# echo -n "$RAILS_ROOT: "
cd $RAILS_ROOT || exit 1
"/usr/local/rvm/scripts/rvm"
#/usr/local/rvm/environments/ruby-1.9.2-p290#hacfest
if [ -z "$PID" ]; then
PID=$RAILS_ROOT/tmp/pids/unicorn.pid
fi
# if [ -z "$DATABASE_URL" ]; then
# DATABASE_URL=null
# fi
# export DATABASE_URL
export PID
export OLD_PID="$PID.oldbin"
export RAILS_ROOT
if [ -z "$START_CMD" ]; then
START_CMD="bundle exec unicorn"
fi
CMD="cd $RAILS_ROOT && $START_CMD -c $UNICORN_CONFIG -E $RAILS_ENV -D"
export CMD
echo "CMD: " $CMD
SUDO=""
# echo who: `whoami`
# echo user $USER
if [ "$USER" != `whoami` ]; then
SUDO="sudo -u $USER -s -H $RUNSHELL -c"
else
SUDO="$RUNSHELL -c"
fi
export SUDO
# echo "SUDO: "$SUDO
# echo $SHELL
}
start_stop () {
# either run the start/stop/reload/etc command for every config under /etc/unicorn
# or just do it for a specific one
# $1 contains the start/stop/etc command
# $2 if it exists, should be the specific config we want to act on
if [ -f "/etc/unicorn/$2.conf" ]; then
. /etc/unicorn/$2.conf
export UNICORN_CONFIG="/etc/unicorn/$2.unicorn.rb"
setup
cmd $1
else
for CONFIG in /etc/unicorn/*.conf; do
# import the variables
export UNICORN_CONFIG=`echo ${CONFIG} | sed 's/conf/unicorn.rb/'`
. $CONFIG
setup
# run the start/stop/etc command
cmd $1
unset PID
done
fi
}
ARGS="$1 $2"
start_stop $ARGS
Make sure that you run unicorn via command:
bundle exec unicorn -D -E <environment> -c config/unicorn.rb
And if it doesn't work, please share your file config/unicorn.rb
File being present at path does not mean it is readable by particular user.
Check that user that you're running unicorn under has access to the files and directories. Usually code owner and unicorn user are the same, so chown -R that_username:that_username /path/to/app/source accordingly.

Docker - sh service script dont take options

i've this docker sh service script in /etc/init.d on a debain 8 machine:
#!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: docker
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: cgroupfs-mount cgroup-lite
# Should-Stop: cgroupfs-mount cgroup-lite
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Create lightweight, portable, self-sufficient containers.
# Description:
# Docker is an open-source project to easily create lightweight, portable,
# self-sufficient containers from any application. The same container that a
# developer builds and tests on a laptop can run at scale, in production, on
# VMs, bare metal, OpenStack clusters, public clouds and more.
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
BASE=docker
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
# This is the pid file managed by docker itself
DOCKER_PIDFILE=/var/run/$BASE.pid
# This is the pid file created/managed by start-stop-daemon
DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_DESC="Docker"
DOCKER_OPTS="--insecure-registry 127.0.0.1:9000"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# Check docker is present
if [ ! -x $DOCKER ]; then
log_failure_msg "$DOCKER not present or not executable"
exit 1
fi
check_init() {
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
}
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$DOCKER_DESC must be run as root"
exit 1
fi
}
cgroupfs_mount() {
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
return
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
}
echo -n "$1"
case "$1" in
start)
echo "start"
check_init
fail_unless_root
cgroupfs_mount
touch "$DOCKER_LOGFILE"
chgrp docker "$DOCKER_LOGFILE"
ulimit -n 1048576
if [ "$BASH" ]; then
ulimit -u 1048576
else
ulimit -p 1048576
fi
echo $DOCKER_OPTS
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
--pidfile "$DOCKER_SSD_PIDFILE" \
--make-pidfile \
-- \
daemon -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
>> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?
;;
stop)
check_init
fail_unless_root
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10
log_end_msg $?
;;
restart)
check_init
fail_unless_root
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
[ -n "$docker_pid" ] \
&& ps -p $docker_pid > /dev/null 2>&1 \
&& $0 stop
$0 start
;;
force-reload)
check_init
fail_unless_root
$0 restart
;;
status)
echo "Prova"
check_init
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
echo "a"
;;
statu)
echo "Prova"
check_init
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
echo "a"
;;
*)
echo "Usage: service docker {start|stop|restart|status} PIPPO"
exit 1
;;
esac
the problem with this is that it doesn't pass the DOCKER_OPTS value to the start-stop-daemon section in start section (or at least they don't work and don't appear in the command of the resulting process in ps aux output).
We've tried to put the DOCKER_OPTS directly in the script as well as let it been read on the default config file but the result is the same, the options doesn't make any effect.
If we try to launch the process with start-stop-daemon directly form terminal with the same options it work just fine.
What can be the reason of this ?
On a side note, we also try to play with the script a bit and found this strage situation:
we made a copy of the status section called statu, then we call booth and they give us different results.
status :
● docker.service - Docker Application Container Engine
Loaded: loaded (/lib/systemd/system/docker.service; enabled)
Active: active (running) since Thu 2016-02-25 12:40:43 CET; 4min 35s ago
Docs: https://docs.docker.com
statu:
[FAIL[....] Docker is not running ... failed!
Being the code the same this is quite a surprise !? Why is it so ?
Systemd doesn't use the scripts in /etc/init.d. From your output, it's using the package default configuration in /lib/systemd/system/docker.service. If you want to make changes:
# make a local copy, /lib/systemd can be overwritten on an application upgrade
cp /lib/systemd/system/docker.service /etc/systemd/system/docker.service
# edit /etc/systemd/system/docker.service
systemctl daemon-reload # updates systemd from files on the disk
systemctl restart docker # restart the service with your changes

Sidekiq donot start on ec2 server reboot

I have deployed my website in ec2 rails server with centos. How can i run sidekiq when ec2 server is reboot? i followed this http://dxta.github.io/blog/2014/03/06/init-script-for-sidekiq-in-centos/ and I wrote a bash script like below but sidekiq dont restart as expected
"#! /bin/bash
#
# sidekiq Init script for sidekiq
#
# chkconfig: 345 99 1
# description: Starts and stops sidekiq message processor
# Source function library.
# . /etc/rc.d/init.d/functions
# You will need to modify these
APP=""sps_qa""
AS_USER=""ec2-user""
APP_DIR=""/home/ec2-user/www/sps_qa/current""
APP_CONFIG=""/home/ec2-user/www/sps_qa/current/config""
LOG_FILE=""/home/ec2-user/www/sps_qa/current/log/sidekiq.log""
LOCK_FILE=""$APP_DIR/${APP}-lock""
PID_FILE=""$APP_DIR/${APP}.pid""
GEMFILE=""$APP_DIR/Gemfile""
SIDEKIQ=""sidekiq""
APP_ENV=""qa""
BUNDLE=""bundle""
# [ -e /etc/sysconfig/sidekiq-your_app ] && . /etc/sysconfig/sidekiq- your_app
START_CMD=""exec ~/.rvm/bin/rvm-shell -c '$BUNDLE exec $SIDEKIQ -q mailer -q default -e $APP_ENV -P $PID_FILE'""
CMD=""source /home/ec2-user/.rvm/scripts/rvm; cd ${APP_DIR}; ${START_CMD} >> ${LOG_FILE} 2>&1 &""
RETVAL=0
start() {
status
if [ $? -eq 1 ]; then
[ `id -u` == '0' ] || (echo ""$SIDEKIQ runs as root only ..""; exit 5)
[ -d $APP_DIR ] || (echo ""$APP_DIR not found!.. Exiting""; exit 6)
cd $APP_DIR
echo ""Starting $SIDEKIQ message processor .. ""
su -c ""$CMD"" - $AS_USER
RETVAL=$?
#Sleeping for 8 seconds for process to be precisely visible in process table - See status ()
sleep 8
[ $RETVAL -eq 0 ] && touch $LOCK_FILEd
"
return $RETVAL
else
echo "$SIDEKIQ message processor is already running .. "
fi
}
stop() {
echo "Stopping $SIDEKIQ message processor .."
SIG="INT"
kill -$SIG `cat $PID_FILE`
RETVAL=$?
[ $RETVAL -eq 0 ] && rm -f $LOCK_FILE
return $RETVAL
}
status() {
ps -ef | grep 'sidekiq [0-9].[0-9].[0-9]' | grep -v grep
return $?
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
case "$1" in
start)
stop)
restart)
;;
reload)
;;
force_reload)
;;
status)
status
if [ $? -eq 0 ]; then
echo "$SIDEKIQ message processor is running .."
RETVAL=0
else
echo "$SIDEKIQ message processor is stopped .."
RETVAL=1
fi
*)
exit 0
esac
exit $RETVAL
currently i am running sidekiq manually.
bundle exec sidekiq -q mailer -q default -e qa -d -L /home/ec2-user/www/sps_qa/current/log/sidekiq.log 2>&1
Create an upstart job so that sidekiq is fired off during boot. There's an example in the sidekiq wiki. Change the params to match yours. https://github.com/mperham/sidekiq/blob/master/examples/upstart/manage-one/sidekiq.conf

PUMA, trying to stop server, Invalid Signal, number or name

Im trying to stop puma server with a script that i've found here --> script
#!/usr/bin/env bash
# Simple move this file into your Rails `script` folder. Also make sure you `chmod +x puma.sh`.
# Please modify the CONSTANT variables to fit your configurations.
# The script will start with config set by $PUMA_CONFIG_FILE by default
PUMA_CONFIG_FILE=config/puma.rb
PUMA_PID_FILE=tmp/pids/puma.pid
PUMA_SOCKET=tmp/sockets/puma.sock
# check if puma process is running
puma_is_running() {
if [ -S $PUMA_SOCKET ] ; then
if [ -e $PUMA_PID_FILE ] ; then
if cat $PUMA_PID_FILE | xargs pgrep -P > /dev/null ; then
return 0
else
echo "No puma process found"
fi
else
echo "No puma pid file found"
fi
else
echo "No puma socket found"
fi
return 1
}
case "$1" in
start)
echo "Starting puma..."
rm -f $PUMA_SOCKET
if [ -e $PUMA_CONFIG_FILE ] ; then
bundle exec puma --config $PUMA_CONFIG_FILE
else
bundle exec puma --daemon --bind unix://$PUMA_SOCKET --pidfile $PUMA_PID_FILE
fi
echo "done"
;;
stop)
echo "Stopping puma..."
kill -s SIGTERM `cat $PUMA_PID_FILE`
rm -f $PUMA_PID_FILE
rm -f $PUMA_SOCKET
echo "done"
;;
restart)
if puma_is_running ; then
echo "Hot-restarting puma..."
kill -s SIGUSR2 `cat $PUMA_PID_FILE`
echo "Doublechecking the process restart..."
sleep 5
if puma_is_running ; then
echo "done"
exit 0
else
echo "Puma restart failed :/"
fi
fi
echo "Trying cold reboot"
script/puma.sh start
;;
*)
echo "Usage: script/puma.sh {start|stop|restart}" >&2
;;
esac
When I try to stop it it gives me this error
/etc/init.d/puma: 54: kill: invalid signal number or name: SIGTERM
What am I missing here?
When this script is being executed its using a different version of kill that doesn't support those args.
You should be able to change kill -s SIGTERM to just kill -15 (this passes the 15 sigterm code)

Resources