I am running docker 17.03.0-ce on premise on an RHEL 7.3 64bit VM. I am pulling a weblogic 10.3.6 RHEL image and after that i modify weblogic domain config file which is required to run my application by passing the IP of the VM during docker run with the command "VM_IP=$(hostname -i)".So i have added sed command in the docker file along with server start script which replaces placeholder with VM IP in the config file. However, when the server starts, the config file seems to revert back to its original state, i.e. without the placeholder value getting replaced. I have observed that if i comment the server start script, the placeholder gets replaced with the proper. I tried executing the sed from a shell script which is the entry point and also tried running sed command from dockerfile.
Below is my docker file
FROM 192.168.1.1:5000/wcp_image:v6
WORKDIR /
USER root
ENV VM_IP=$vm_ip
RUN rm -rf
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config/config.xml
COPY config.xml
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config/
RUN chmod 777
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config/config.xml &&
chown -R wcpuser.wcpuser
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config/config.xml
COPY replace.sh /data/docker_setup
RUN chmod 777 /data/docker_setup/replace.sh
ENTRYPOINT ["/data/docker_setup/replace.sh"]
CMD ["$VM_IP"]
And below is the replace.sh shell script
#!/bin/bash
cd /data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config
sed -i -r "s/VM_IP/$VM_IP/g"
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/config/config.xml
/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN/bin/startWebLogic.sh
Please let me know. Thanks in advance.
Update
Below is the startWeblogic.sh script from the container
#!/bin/sh
# WARNING: This file is created by the Configuration Wizard.
# Any changes to this script may be lost when adding extensions to this configuration.
# --- Start Functions ---
stopAll()
{
# We separate the stop commands into a function so we are able to use the trap command in Unix (calling a function) to stop these services
if [ "X${ALREADY_STOPPED}" != "X" ] ; then
exit
fi
# STOP DERBY (only if we started it)
if [ "${DERBY_FLAG}" = "true" ] ; then
echo "Stopping Derby server..."
${WL_HOME}/common/derby/bin/stopNetworkServer.sh >"${DOMAIN_HOME}/derbyShutdown.log" 2>&1
echo "Derby server stopped."
fi
ALREADY_STOPPED="true"
}
classCaching()
{
echo "Class caching enabled..."
JAVA_OPTIONS="${JAVA_OPTIONS} -Dlaunch.main.class=${SERVER_CLASS} -Dlaunch.class.path="${CLASSPATH}" -Dlaunch.complete=weblogic.store.internal.LockManagerImpl -cp ${WL_HOME}/server/lib/pcl2.jar"
export JAVA_OPTIONS
SERVER_CLASS="com.oracle.classloader.launch.Launcher"
}
# --- End Functions ---
# *************************************************************************
# This script is used to start WebLogic Server for this domain.
#
# To create your own start script for your domain, you can initialize the
# environment by calling #USERDOMAINHOME/setDomainEnv.
#
# setDomainEnv initializes or calls commEnv to initialize the following variables:
#
# BEA_HOME - The BEA home directory of your WebLogic installation.
# JAVA_HOME - Location of the version of Java used to start WebLogic
# Server.
# JAVA_VENDOR - Vendor of the JVM (i.e. BEA, HP, IBM, Sun, etc.)
# PATH - JDK and WebLogic directories are added to system path.
# WEBLOGIC_CLASSPATH
# - Classpath needed to start WebLogic Server.
# PATCH_CLASSPATH - Classpath used for patches
# PATCH_LIBPATH - Library path used for patches
# PATCH_PATH - Path used for patches
# WEBLOGIC_EXTENSION_DIRS - Extension dirs for WebLogic classpath patch
# JAVA_VM - The java arg specifying the VM to run. (i.e.
# - server, -hotspot, etc.)
# USER_MEM_ARGS - The variable to override the standard memory arguments
# passed to java.
# PRODUCTION_MODE - The variable that determines whether Weblogic Server is started in production mode.
# DERBY_HOME - Derby home directory.
# DERBY_CLASSPATH
# - Classpath needed to start Derby.
#
# Other variables used in this script include:
# SERVER_NAME - Name of the weblogic server.
# JAVA_OPTIONS - Java command-line options for running the server. (These
# will be tagged on to the end of the JAVA_VM and
# MEM_ARGS)
# CLASS_CACHE - Enable class caching of system classpath.
#
# For additional information, refer to "Managing Server Startup and Shutdown for Oracle WebLogic Server"
# (http://download.oracle.com/docs/cd/E17904_01/web.1111/e13708/overview.htm).
# *************************************************************************
umask 037
# Call setDomainEnv here.
DOMAIN_HOME="/data/Oracle/Middleware/user_projects/domains/WCP_DOMAIN"
. ${DOMAIN_HOME}/bin/setDomainEnv.sh $*
SAVE_JAVA_OPTIONS="${JAVA_OPTIONS}"
SAVE_CLASSPATH="${CLASSPATH}"
# Start Derby
DERBY_DEBUG_LEVEL="0"
if [ "${DERBY_FLAG}" = "true" ] ; then
${WL_HOME}/common/derby/bin/startNetworkServer.sh >"${DOMAIN_HOME}/derby.log" 2>&1
fi
JAVA_OPTIONS="${SAVE_JAVA_OPTIONS}"
SAVE_JAVA_OPTIONS=""
CLASSPATH="${SAVE_CLASSPATH}"
SAVE_CLASSPATH=""
trap 'stopAll' 1 2 3 15
if [ "${PRODUCTION_MODE}" = "true" ] ; then
WLS_DISPLAY_MODE="Production"
else
WLS_DISPLAY_MODE="Development"
fi
if [ "${WLS_USER}" != "" ] ; then
JAVA_OPTIONS="${JAVA_OPTIONS} -Dweblogic.management.username=${WLS_USER}"
fi
if [ "${WLS_PW}" != "" ] ; then
JAVA_OPTIONS="${JAVA_OPTIONS} -Dweblogic.management.password=${WLS_PW}"
fi
if [ "${MEDREC_WEBLOGIC_CLASSPATH}" != "" ] ; then
if [ "${CLASSPATH}" != "" ] ; then
CLASSPATH="${CLASSPATH}${CLASSPATHSEP}${MEDREC_WEBLOGIC_CLASSPATH}"
else
CLASSPATH="${MEDREC_WEBLOGIC_CLASSPATH}"
fi
fi
echo "."
echo "."
echo "JAVA Memory arguments: ${MEM_ARGS}"
echo "."
echo "WLS Start Mode=${WLS_DISPLAY_MODE}"
echo "."
echo "CLASSPATH=${CLASSPATH}"
echo "."
echo "PATH=${PATH}"
echo "."
echo "***************************************************"
echo "* To start WebLogic Server, use a username and *"
echo "* password assigned to an admin-level user. For *"
echo "* server administration, use the WebLogic Server *"
echo "* console at http://hostname:port/console *"
echo "***************************************************"
# CLASS CACHING
if [ "${CLASS_CACHE}" = "true" ] ; then
classCaching
fi
# START WEBLOGIC
echo "starting weblogic with Java version:"
${JAVA_HOME}/bin/java ${JAVA_VM} -version
if [ "${WLS_REDIRECT_LOG}" = "" ] ; then
echo "Starting WLS with line:"
echo "${JAVA_HOME}/bin/java ${JAVA_VM} ${MEM_ARGS} -Dweblogic.Name=${SERVER_NAME} -Djava.security.policy=${WL_HOME}/server/lib/weblogic.policy ${JAVA_OPTIONS} ${PROXY_SETTINGS} ${SERVER_CLASS}"
${JAVA_HOME}/bin/java ${JAVA_VM} ${MEM_ARGS} -Dweblogic.Name=${SERVER_NAME} -Djava.security.policy=${WL_HOME}/server/lib/weblogic.policy ${JAVA_OPTIONS} ${PROXY_SETTINGS} ${SERVER_CLASS}
else
echo "Redirecting output from WLS window to ${WLS_REDIRECT_LOG}"
${JAVA_HOME}/bin/java ${JAVA_VM} ${MEM_ARGS} -Dweblogic.Name=${SERVER_NAME} -Djava.security.policy=${WL_HOME}/server/lib/weblogic.policy ${JAVA_OPTIONS} ${PROXY_SETTINGS} ${SERVER_CLASS} >"${WLS_REDIRECT_LOG}" 2>&1
fi
stopAll
popd
# Exit this script only if we have been told to exit.
if [ "${doExitFlag}" = "true" ] ; then
exit
fi
Related
I am trying to import a pipeline into streamsets, during container start up, by using the Docker CMD command in Dockerfile. The image builds, but while creating the container there is no error but it exits with code 0. So it never comes up. Here is what I did:
Dockerfile:
FROM streamsets/datacollector:3.18.1
COPY myPipeline.json /pipelinejsonlocation/
EXPOSE 18630
ENTRYPOINT ["/bin/sh"]
CMD ["/opt/streamsets-datacollector-3.18.1/bin/streamsets","cli","-U", "http://localhost:18630", \
"-u", \
"admin", \
"-p", \
"admin", \
"store", \
"import", \
"-n", \
"myPipeline", \
"--stack", \
"-f", \
"/pipelinejsonlocation/myPipeline.json"]
Build image:
docker build -t cmp/sdc .
Run image:
docker run -p 18630:18630 -d --name sdc cmp/sdc
This outputs the container id. But the container is in the Exited status as shown below.
docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
537adb1b05ab cmp/sdc "/bin/sh /opt/stream…" 5 seconds ago Exited (0) 3 seconds ago sdc
When I do not specify the CMD command in the Dockerfile, the streamsets container spins up and then when I run the streamsets import command in the running container in shell, it works. But how do I get it done during provisioning itself? Is there something I am missing in the Dockerfile?
In your Dockerfile you overwrite the default CMD and ENTRYPOINT from the StreamSets Data Collector Dockerfile. So the container only executes your command during startup and exits without errors afterwards. This is the reason why your container is in Exited (0) status.
In general this is good and expected behavior. If you want to keep your container alive you need to execute another command in the foreground, which never ends. But unfortunately, you cannot run multiple CMDs in your docker file.
I dug a little deeper. The default entry point of the image is ENTRYPOINT ["/docker-entrypoint.sh"]. This script sets up a few things and starts the Data Collector.
It is required that the Data Collector is running before the pipeline is imported. So a solution could be to copy the default docker-entrypoint.sh and modify it to start the Data Collector and import the pipeline afterwards. You could to it like this:
Dockerfile:
FROM streamsets/datacollector:3.18.1
COPY myPipeline.json /pipelinejsonlocation/
# Replace docker-entrypoint.sh
COPY docker-entrypoint.sh /docker-entrypoint.sh
EXPOSE 18630
docker-entrypoint.sh (https://github.com/streamsets/datacollector-docker/blob/master/docker-entrypoint.sh):
#!/bin/bash
#
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
# We translate environment variables to sdc.properties and rewrite them.
set_conf() {
if [ $# -ne 2 ]; then
echo "set_conf requires two arguments: <key> <value>"
exit 1
fi
if [ -z "$SDC_CONF" ]; then
echo "SDC_CONF is not set."
exit 1
fi
grep -q "^$1" ${SDC_CONF}/sdc.properties && sed 's|^#\?\('"$1"'=\).*|\1'"$2"'|' -i ${SDC_CONF}/sdc.properties || echo -e "\n$1=$2" >> ${SDC_CONF}/sdc.properties
}
# support arbitrary user IDs
# ref: https://docs.openshift.com/container-platform/3.3/creating_images/guidelines.html#openshift-container-platform-specific-guidelines
if ! whoami &> /dev/null; then
if [ -w /etc/passwd ]; then
echo "${SDC_USER:-sdc}:x:$(id -u):0:${SDC_USER:-sdc} user:${HOME}:/sbin/nologin" >> /etc/passwd
fi
fi
# In some environments such as Marathon $HOST and $PORT0 can be used to
# determine the correct external URL to reach SDC.
if [ ! -z "$HOST" ] && [ ! -z "$PORT0" ] && [ -z "$SDC_CONF_SDC_BASE_HTTP_URL" ]; then
export SDC_CONF_SDC_BASE_HTTP_URL="http://${HOST}:${PORT0}"
fi
for e in $(env); do
key=${e%=*}
value=${e#*=}
if [[ $key == SDC_CONF_* ]]; then
lowercase=$(echo $key | tr '[:upper:]' '[:lower:]')
key=$(echo ${lowercase#*sdc_conf_} | sed 's|_|.|g')
set_conf $key $value
fi
done
# MODIFICATIONS:
#exec "${SDC_DIST}/bin/streamsets" "$#"
check_data_collector_status () {
watch -n 1 ${SDC_DIST}/bin/streamsets cli -U http://localhost:18630 ping | grep -q 'version' && echo "Data Collector has started!" && import_pipeline
}
function import_pipeline () {
sleep 1
echo "Start to import pipeline"
${SDC_DIST}/bin/streamsets cli -U http://localhost:18630 -u admin -p admin store import -n myPipeline --stack -f /pipelinejsonlocation/myPipeline.json
echo "Finished importing pipeline"
}
# Start checking if Data Collector is up (in background) and start Data Collector
check_data_collector_status & ${SDC_DIST}/bin/streamsets $#
I commented out the last line exec "${SDC_DIST}/bin/streamsets" "$#" of the default docker-entrypoint.sh and added two functions. check_data_collector_status () pings the Data Collector service until it is available. import_pipeline () imports your pipeline.
check_data_collector_status () runs in background and ${SDC_DIST}/bin/streamsets $# is started in foreground as before. So the pipeline is imported after the Data Collector service is started.
Run this image with sleep command:
docker run -p 18630:18630 -d --name sdc cmp/sdc sleep 300
300 is the time to sleep in seconds.
Then exec your script manually within the docker container and find out what's wrong.
I am actually new to programming and Docker. I tried to rebuild the graphile/postgraphile:4.7.0 image to add DATABASE_URL_FILE environment variable support. This is what I do and the output.
Dockerfile
FROM node:alpine
LABEL description="Instant high-performance GraphQL API for your PostgreSQL database https://github.com/graphile/postgraphile"
# Install PostGraphile and PostGraphile connection filter plugin
RUN npm install -g postgraphile
RUN npm install -g postgraphile-plugin-connection-filter
EXPOSE 5000
# patch postgraphile:4.7.1; DATABASE_URL_FILE environment variable implementation.
RUN apk add bash
COPY docker-entrypoint.sh /usr/local/bin/
RUN ln -s /usr/local/bin/docker-entrypoint.sh / # backwards compat
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["postgraphile", "-n", "0.0.0.0"]
docker-entrypoint.sh copied from postgres docker-entrypoint.sh
#!/usr/bin/env bash
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
# Loads various settings that are used elsewhere in the script
# This should be called before any other functions
docker_setup_env() {
file_env 'DATABASE_URL'
}
output
$ docker-compose up postgraphile
Attaching to test_postgraphile_1
test_postgraphile_1 | exited with code 0
Sincerely,
kidfrom
Edit: Benjie (Postgraphile maintainer) helped me alot on this one. This should work as is.
Dockerfile
FROM node:alpine
LABEL description="Instant high-performance GraphQL API for your PostgreSQL database https://github.com/graphile/postgraphile"
# Install PostGraphile and PostGraphile connection filter plugin
RUN npm install -g postgraphile
RUN npm install -g postgraphile-plugin-connection-filter
EXPOSE 5000
# patch postgraphile:4.7.1; DATABASE_URL_FILE environment variable implementation.
RUN apk add bash
COPY docker-entrypoint.sh /usr/local/bin/
RUN ln -s /usr/local/bin/docker-entrypoint.sh / # backwards compat
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["postgraphile", "-n", "0.0.0.0"]
docker-entrypoint.sh copied from postgres docker-entrypoint.sh
#!/usr/bin/env bash
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
# Loads various settings that are used elsewhere in the script
# This should be called before any other functions
docker_setup_env() {
file_env 'DATABASE_URL'
}
# call the function
docker_setup_env
# call postgraphile
exec "$#"
Sincerely,
kidfrom
I need to hide password of postgres in odoo container like in postgres container, when we create new container postgres we can pass env variable POSTGRES_PASSWORD_FILE, and the content of Dockerfile of postgres 9.6 contains this block of code:
#!/usr/bin/env bash
set -Eeo pipefail
# TODO swap to -Eeuo pipefail above (after handling all potentially-unset variables)
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
How we can do the same code in odoo image?
You can try to add your whole odoo.conf as a secret, create your service and add that secret with the --secret file and point to the path /run/secrets/${MY_SECRET} on your entrypoint with the -c flag on the odoo command
I've deployed an app in production in an Ubuntu Server VM. It uses Puma, so I've followed this guide: https://www.digitalocean.com/community/tutorials/how-to-deploy-a-rails-app-with-puma-and-nginx-on-ubuntu-14-04
to configure it there (it is currently working properly on heroku, we are looking to migrate it to this new server).
This is my /etc/init/puma-manager.conf
# /etc/init/puma-manager.conf - manage a set of Pumas
description "Manages the set of puma processes"
# This starts upon bootup and stops on shutdown
start on runlevel [2345]
stop on runlevel [06]
# Set this to the number of Puma processes you want
# to run on this machine
env PUMA_CONF="/etc/puma.conf"
pre-start script
for i in `cat $PUMA_CONF`; do
app=`echo $i | cut -d , -f 1`
logger -t "puma-manager" "Starting $app"
start puma app=$app
done
end script
And my /etc/init/puma.conf
description "Puma Background Worker"
# no "start on", we don't want to automatically start
stop on (stopping puma-manager or runlevel [06])
# change apps to match your deployment user if you want to use this as a less privileged user (recommended!)
setuid user
setgid user
respawn
respawn limit 3 30
instance ${app}
script
# source ENV variables manually as Upstart doesn't, eg:
. /etc/server-vars
exec /bin/bash <<'EOT'
# set HOME to the setuid user's home, there doesn't seem to be a better, portable way
export HOME="$(eval echo ~$(id -un))"
if [ -d "/usr/local/rbenv/bin" ]; then
export PATH="/usr/local/rbenv/bin:/usr/local/rbenv/shims:$PATH"
elif [ -d "$HOME/.rbenv/bin" ]; then
export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
elif [ -f /etc/profile.d/rvm.sh ]; then
source /etc/profile.d/rvm.sh
elif [ -f /usr/local/rvm/scripts/rvm ]; then
source /etc/profile.d/rvm.sh
elif [ -f "$HOME/.rvm/scripts/rvm" ]; then
source "$HOME/.rvm/scripts/rvm"
elif [ -f /usr/local/share/chruby/chruby.sh ]; then
source /usr/local/share/chruby/chruby.sh
if [ -f /usr/local/share/chruby/auto.sh ]; then
source /usr/local/share/chruby/auto.sh
fi
# if you aren't using auto, set your version here
# chruby 2.0.0
fi
cd $app
logger -t puma "Starting server: $app"
exec bundle exec puma -C config/puma.rb
EOT
end script
It works properly BUT it is not setting the ENV variables I specify in:
/etc/server-vars
I don't want to put all ENV vars directly into this script because they are many, and it limits the usability of the script.
The solution for me was to use "set -a" before sourcing the environment file. Here's the documentation describing what set -a does: The Set Builtin
Try 'set -a' before sourcing your environment file as you can see in the following example:
# /etc/init/puma.conf - Puma config
# This example config should work with Ubuntu 12.04+. It
# allows you to manage multiple Puma instances with
# Upstart, Ubuntu's native service management tool.
#
# See puma-manager.conf for how to manage all Puma instances at once.
#
# Save this config as /etc/init/puma.conf then manage puma with:
# sudo start puma app=PATH_TO_APP
# sudo stop puma app=PATH_TO_APP
# sudo status puma app=PATH_TO_APP
#
# or use the service command:
# sudo service puma {start,stop,restart,status}
#
description "Puma Background Worker"
# no "start on", we don't want to automatically start
start on runlevel [2345]
stop on runlevel [06]
# change apps to match your deployment user if you want to use this as a less privileged user (recommended!)
setuid deploy
setgid deploy
respawn
respawn limit 3 30
instance ${app}
script
# this script runs in /bin/sh by default
# respawn as bash so we can source in rbenv/rvm
# quoted heredoc to tell /bin/sh not to interpret
# variables
# source ENV variables manually as Upstart doesn't, eg:
#. /etc/environment
exec /bin/bash <<'EOT'
set -a
. /etc/environment
# set HOME to the setuid user's home, there doesn't seem to be a better, portable way
export HOME="$(eval echo ~$(id -un))"
if [ -d "/usr/local/rbenv/bin" ]; then
export PATH="/usr/local/rbenv/bin:/usr/local/rbenv/shims:$PATH"
elif [ -d "$HOME/.rbenv/bin" ]; then
export PATH="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
elif [ -f /etc/profile.d/rvm.sh ]; then
source /etc/profile.d/rvm.sh
elif [ -f /usr/local/rvm/scripts/rvm ]; then
source /etc/profile.d/rvm.sh
elif [ -f "$HOME/.rvm/scripts/rvm" ]; then
source "$HOME/.rvm/scripts/rvm"
elif [ -f /usr/local/share/chruby/chruby.sh ]; then
source /usr/local/share/chruby/chruby.sh
if [ -f /usr/local/share/chruby/auto.sh ]; then
source /usr/local/share/chruby/auto.sh
fi
# if you aren't using auto, set your version here
# chruby 2.0.0
fi
logger -t puma "Starting server: $app"
cd $app
exec bundle exec puma -C /home/deploy/brilliant/config/puma.rb
EOT
end script
We're using the BeagleBone Black running Angstrom Linux and the opkg package manager to power some of our systems. We need to ensure that we have consistent and reliable access to specific versions of opkg packages. I've set up an in-house opkg repository. Is there any way to sync packages between repositories ? e.g. I'd like to copy specific packages from public / not always accessible repositories to our internal repository, both for speed and reliable access.
After some fooling around with various packages, etc, I found a way of cloning (parts of) a repository using an Ubuntu system. Here's the steps I took:
# Install apache
sudo apt-get install apache2
# Install git
sudo apt-get install git
# Download the opkg-utils from the Yocto Project
git clone http://git.yoctoproject.org/git/opkg-utils
# Build the opkg-utils
cd opkg-utils && make; cd -
# Move them to a common directory
mv opkg-utils /usr/local/share\
# Add them to my path
echo "PATH=\"\$PATH:/usr/local/share/opkg-utils\"" >> /etc/environment
# Update my environment
source /etc/environment
# Create the structure of my repository
mkdir -p /var/www/repositories/opkg/beaglebone
# Create an index for the packages
opkg-make-index -l Packages.filelist -p Packages /var/www/repositories/opkg/beaglebone
cd /var/www/repositories/opkg/beaglebone
gzip -c Packages > Packages.gz
On my client BeagleBone Blacks, to setup access to this repository:
echo "src/gz reponame http://myserver/repositories/opkg/beaglebone" > /etc/opkg/rms-feed.conf
chmod 666 /etc/opkg/reponame-feed.conf
opkg update
On my developer machines, any time I need to backup a package:
#!/bin/bash
###############################################################################
#
# bbb_clone_package_to_internal_repo.sh
#
# Description:
# Clones an ipkg / opkg package to the internal repository server so that it can be deployed
# to BeagleBone Black clients on demand. This is so that we can have backups in
# the event that a public server becomes temporarily or permanently
# inaccessible.
#
# Pre-conditions:
# 1) The given package file must exist at the path specified.
#
# Post-conditions:
# 1) The given package file will be sent to the internal repository server.
# 2) The opkg repository indexes will all be updated
#
# Parameters:
# -p <file path.opk> : The package to be cloned
#
###############################################################################
PACKAGE_FILE_PATH=""
SERVER="myserver"
ERR_INVALID_PACKAGE_FILE_NAME=1
ERR_PACKAGE_FILE_NOT_ACCESSIBLE=2
ERR_FAILED_TO_COPY_PACKAGE_TO_SERVER=3
ERR_FAILED_TO_DEPLOY_PACKAGE_ON_SERVER=4
usage()
{
cat << EOF
usage: $0 [options]
This script copies a remote ipkg/opkg file to the $SERVER server for subsequent
deployment to BeagleBone Black boards.
OPTIONS:
-p <file path.[io]pk> The package file to be deployed
-h,? Show this message
EOF
}
while getopts “p:h?” OPTION
do
case $OPTION in
p)
PACKAGE_FILE_PATH="$OPTARG"
;;
h)
usage
exit
;;
?)
usage
exit
;;
esac
done
if [[ -z "$PACKAGE_FILE_PATH" || ! ( "$PACKAGE_FILE_PATH" =~ \.[io]pk$ ) ]]; then
echo "The package file must not be blank and must have an .ipk or .opk suffix"
exit $ERR_INVALID_PACKAGE_FILE_NAME
fi
# Retrieve the package
wget -q "$PACKAGE_FILE_PATH"
RESULT="$?"
if [[ $RESULT -ne 0 ]]; then
echo "Failed to retrieve file $PACKAGE_FILE_PATH with result $RESULT"
exit $ERR_PACKAGE_FILE_NOT_ACCESSIBLE
fi
# Deploy the package to myserver
PACKAGE_FILE_NAME="$(basename $PACKAGE_FILE_PATH)"
REPOSITORY_ROOT="/var/www/repositories/opkg/beaglebone"
scp "$PACKAGE_FILE_NAME" root#$SERVER:$REPOSITORY_ROOT
RESULT="$?"
if [[ $RESULT -ne 0 ]]; then
echo "Failed to copy file $PACKAGE_FILE_NAME to server with result $RESULT"
exit $ERR_FAILED_TO_COPY_PACKAGE_TO_SERVER
fi
ssh root#$SERVER "chmod 644 $REPOSITORY_ROOT/$PACKAGE_FILE_NAME; opkg-make-index -l $REPOSITORY_ROOT/Packages.filelist -p $REPOSITORY_ROOT/Packages -r $REPOSITORY_ROOT/Packages $REPOSITORY_ROOT && gzip -c $REPOSITORY_ROOT/Packages > $REPOSITORY_ROOT/Packages.gz"
RESULT="$?"
if [[ $RESULT -ne 0 ]]; then
echo "Failed to deploy file $PACKAGE_FILE_NAME in repository with result $RESULT"
exit $ERR_FAILED_TO_DEPLOY_PACKAGE_ON_SERVER
fi
exit 0