I have the following docker file
FROM alpine:3.10 as builder
# CAMUNDA VARIABLES
ARG VERSION=7.12.0
ARG DISTRO=tomcat
ARG SNAPSHOT=false
ARG EE=true
#Understand difference between ARG and ENV:
#https://vsupalov.com/docker-arg-vs-env/
#AZURE SQL VARIABLES, DEFAULT SETTINS ARE DEV DB Environment
#Be sure database and tables are created upfront
#Use docker build . -t xx/camundawithbasicdev
#Use docker build . -t xx/camundawithbasicauthqa --build-arg SQLSERVER=a_value DBPASSWORD=a_value DBUSERNAME=a_value
#Use docker build . -t xx/camundawithbasicauthprod --build-arg some_variable_name=a_value
#And replace variables as needed.
ARG SQLSERVER=xx
ARG DBPASSWORD=Hxx
ARG DBUSERNAME=xx
ARG DATABASENAME=xx
RUN apk add --no-cache \
ca-certificates \
maven \
tar \
wget \
xmlstarlet
COPY settings.xml download.sh camunda-tomcat.sh camunda-wildfly.sh /tmp/
RUN /tmp/download.sh
#Enable Basic AUTH
COPY web.xml /camunda/webapps/engine-rest/WEB-INF/web.xml
##### FINAL IMAGE #####
FROM alpine:3.10
ARG VERSION=7.12.0
ENV CAMUNDA_VERSION=${VERSION}
ENV DB_DRIVER=com.microsoft.sqlserver.jdbc.SQLServerDriver
ENV DB_URL=jdbc:sqlserver://${SQLSERVER}.database.windows.net:1433;database=${DATABASENAME};user=${DBUSERNAME};password={DBPASSWORD};encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;
ENV DB_USERNAME=${DBUSERNAME}
ENV DB_PASSWORD=${DBPASSWORD}
ENV DB_CONN_MAXACTIVE=20
ENV DB_CONN_MINIDLE=5
ENV DB_CONN_MAXIDLE=20
ENV DB_VALIDATE_ON_BORROW=true
ENV DB_VALIDATION_QUERY="SELECT 1"
ENV SKIP_DB_CONFIG=
ENV WAIT_FOR=
ENV WAIT_FOR_TIMEOUT=120
ENV TZ=UTC
ENV DEBUG=TRUE
ENV JAVA_OPTS="-Xmx768m -XX:MaxMetaspaceSize=256m"
ENV JMX_PROMETHEUS=false
EXPOSE 8080 8000
# Downgrading wait-for-it is necessary until this PR is merged
# https://github.com/vishnubob/wait-for-it/pull/68
RUN apk add --no-cache \
bash \
ca-certificates \
openjdk11-jre-headless \
tzdata \
tini \
xmlstarlet \
&& wget -O /usr/local/bin/wait-for-it.sh \
"https://raw.githubusercontent.com/vishnubob/wait-for-it/a454892f3c2ebbc22bd15e446415b8fcb7c1cfa4/wait-for-it.sh" --no-check-certificate \
&& chmod +x /usr/local/bin/wait-for-it.sh
RUN addgroup -g 1000 -S camunda && \
adduser -u 1000 -S camunda -G camunda -h /camunda -s /bin/bash -D camunda
# end ssh config
WORKDIR /camunda
USER camunda
#MSSQL SERVER JDBC DRIVER INSTALL
COPY mssql-jdbc-7.2.2.jre11.jar /camunda/lib/
ENTRYPOINT ["/sbin/tini", "--"]
CMD "./camunda.sh"
COPY --chown=camunda:camunda --from=builder /camunda .
However when I run the image, it looks like the variables are not replaced on the connection string part
what am I missing?
You're setting the arguments on a different stage. As far as I know, it's not possible to pass them between stages. For example:
This won't work
FROM alpine:3.10 as builder
ARG TEST1=1234
FROM alpine:3.10
ENV TEST2=blablah-${TEST1}
if you print TEST2 you'll get blablah-
This will:
FROM alpine:3.10 as builder
ARG TEST1=1234
FROM alpine:3.10
ARG TEST1=98765
ENV TEST2=blablah-${TEST1}
if you print TEST2 you'll get `blahblah-98765
However, I would strongly advice against this way as it'll store your passwords and so on as default env vars in the Docker image you're building. I would pass the connection details when you run docker either with docker run -e TEST1=1234 or even better, with a docker-compose file.
I would have Dockerfile like this one (very simplified, only with the env vars) Normally, when I need to construct variables like that I do so in the entrypoint.sh
So my entrypoint looks like:
#!/bin/bash
export DB_URL="jdbc:sqlserver://${SQLSERVER}.database.windows.net:1433;database=${DB_NAME};user=${DB_USERNAME};password=${DB_PASSWORD};encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=0;"
echo $DB_URL
echo "Running: '$#'"
exec $#
My Dockerfile looks like:
FROM alpine:3.10 as builder
ARG VERSION
ARG DISTRO
ARG SNAPSHOT
ARG EE
FROM alpine:3.10
ARG VERSION
ENV CAMUNDA_VERSION=${VERSION}
ENV DB_DRIVER=com.microsoft.sqlserver.jdbc.SQLServerDriver
ENV DB_CONN_MAXACTIVE=20
ENV DB_CONN_MINIDLE=5
ENV DB_CONN_MAXIDLE=20
ENV DB_VALIDATE_ON_BORROW=true
ENV DB_VALIDATION_QUERY="SELECT 1"
ENV SKIP_DB_CONFIG=
ENV WAIT_FOR=
ENV WAIT_FOR_TIMEOUT=120
ENV TZ=UTC
ENV DEBUG=TRUE
ENV JAVA_OPTS="-Xmx768m -XX:MaxMetaspaceSize=256m"
ENV JMX_PROMETHEUS=false
RUN apk update && apk add bash
ADD entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]
And my docker-compose.yml
version: '2.3'
services:
so-example:
image: stackoverflow-example
build:
context: .
args:
- VERSION
- DISTRO
- SNAPSHOT
environment:
- SQLSERVER
- DB_USERNAME
- DB_PASSWORD
- DB_NAME
Then, to build the new image I run:
docker-compose build --build-arg VERSION=7.12.0 --build-arg DISTRO=tomcat --build-arg SNAPSHOT=false --build-arg EE=true
When I need to run it I export the values I need. For instance:
export SQLSERVER=myhost
export DB_USERNAME=db_user
export DB_PASSWORD=mysecretpassword
export DB_NAME=mydb
And then run it with compose. For example, if I run
docker-compose run so-example env
you should see that DB_URL is
jdbc:sqlserver://myhost.database.windows.net:1433;database=mydb;user=db_user;password=mysecretpassword;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=0;
I hope this helps
Related
I have this Dockerfile
ARG FUNCTION_DIR="/opt/"
FROM node:10.13-alpine#sha256:22c8219b21f86dfd7398ce1f62c48a022fecdcf0ad7bf3b0681131bd04a023a2 AS BUILD_IMAGE
ARG FUNCTION_DIR
RUN apk --update add cmake autoconf automake libtool binutils libexecinfo-dev python2 gcc make g++ zlib-dev
ENV NODE_ENV=production
ENV PYTHON=/usr/bin/python2
RUN mkdir -p ${FUNCTION_DIR}
WORKDIR ${FUNCTION_DIR}
COPY package.json yarn.lock ./
RUN yarn --frozen-lockfile
RUN npm prune --production
RUN yarn cache clean
RUN npm cache clean --force
FROM node:10.13-alpine#sha256:22c8219b21f86dfd7398ce1f62c48a022fecdcf0ad7bf3b0681131bd04a023a2
ARG FUNCTION_DIR
ENV NODE_ENV=production
ENV NODE_OPTIONS=--max_old_space_size=4096
RUN apk update \
&& apk upgrade \
&& apk add mongodb-tools fontconfig dumb-init \
&& rm -rf /var/cache/apk/*
RUN mkdir -p ${FUNCTION_DIR}
WORKDIR ${FUNCTION_DIR}
COPY --from=BUILD_IMAGE ${FUNCTION_DIR}/node_modules ./node_modules
COPY . .
RUN if [ -f core/config/local.js ]; then rm core/config/local.js; fi
RUN cp core/config/local.js.aws.readonly core/config/local.js
USER node
EXPOSE 8080
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["node", "app.js", "--app=search", "--env=production"]
I use this Dockerfile to generate an image (called core-a) that run our application in K8s. I've added some code inside my application to handle the case our application is launched from a lambda function and i've created another Dockerfile like the one above but using custom ENTRYPOINT and CMD setting this values.
ENTRYPOINT [ "/usr/local/bin/npx", "aws-lambda-ric" ]
CMD [ "apps/search/index.handler" ]
Than i deployed this image called core-b to ecr using core-b as docker image for a lambda function and everything works as expected.
After that i thought that i can use the possibility to overwrite entrypoint and CMD in order to use the same docker image for both environments, so i modified Lambda function's image pointing to core-a and using the entrypoint and cmd values i used in core-b dockerfile, but doing so i get an error
Couldn't find valid bootstrap(s): [\"/usr/local/bin/npx\"]
Did anyone have any suggestion ?
Try to remove the quotation marks (" ") when entering the override value in this web form.
These AWS docs unfortunately have an uncorrect note that say to use the quotation marks on each string.
Using DockerToolbox, I've been trying for the past few days to update my container to run in heroku.
I cant seem to update the code in the container.
Here are some of things I've tried:
in Docker file change COPY . /app to ADD . /app
Removed docker machine and create a virtualbox
`docker-machine rm default`
`docker-machine create --drive virtualbox default`
build/run docker image
`docker build --no-cache -t appname`
`docker run -it -p 8888:8080 appname`
Also tried docker build --no-cache .
Docker File
FROM python:3.6
# create and set working directory
RUN mkdir /app
WORKDIR /app
# Add current directory code to working directory
ADD . /app/
# set default environment variables
ENV PYTHONUNBUFFERED 1
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
ENV PORT 8080
RUN wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz && \
tar -xvzf ta-lib-0.4.0-src.tar.gz && \
cd ta-lib/ && \
./configure --prefix=/usr && \
make && \
make install
COPY . /app
WORKDIR /app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install ta-lib
EXPOSE 8080
CMD gunicorn appname.wsgi:application --bind 0.0.0.0:$PORT
I am trying to build a jenkins docker image from official jenkins git repo:
https://github.com/jenkinsci/docker.
But when I try to run the container of the image using docker run -it -dP jenkins, it exits immediately and when i check the docker logs, I get the following error:
: invalid option
I read that the error could be because the pid of tini is not 1. I looked at the documents and saw that if we do the following, it should solve the issue.
Passing the -s argument to Tini (tini -s -- ...)
Setting the environment variable TINI_SUBREAPER (e.g. export TINI_SUBREAPER=).
But it did not solve anything.
The following is the exact copy of the Dockerfile being built with docker build -t jenkins .:
FROM openjdk:8-jdk
RUN apt-get update && apt-get install -y git curl && rm -rf /var/lib/apt/lists/*
ARG user=jenkins
ARG group=jenkins
ARG uid=1000
ARG gid=1000
ARG http_port=8080
ARG agent_port=50000
ENV JENKINS_HOME /var/jenkins_home
ENV JENKINS_SLAVE_AGENT_PORT ${agent_port}
ENV TINI_SUBREAPER=
# Jenkins is run with user `jenkins`, uid = 1000
# If you bind mount a volume from the host or a data container,
# ensure you use the same uid
RUN groupadd -g ${gid} ${group} \
&& useradd -d "$JENKINS_HOME" -u ${uid} -g ${gid} -m -s /bin/bash ${user}
# Jenkins home directory is a volume, so configuration and build history
# can be persisted and survive image upgrades
VOLUME /var/jenkins_home
# `/usr/share/jenkins/ref/` contains all reference configuration we want
# to set on a fresh new installation. Use it to bundle additional plugins
# or config file with your custom jenkins Docker image.
RUN mkdir -p /usr/share/jenkins/ref/init.groovy.d
ENV TINI_VERSION 0.14.0
ENV TINI_SHA 6c41ec7d33e857d4779f14d9c74924cab0c7973485d2972419a3b7c7620ff5fd
# Use tini as subreaper in Docker container to adopt zombie processes
RUN curl -fsSL https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-static-amd64 -o /bin/tini && chmod +x /bin/tini \
&& echo "$TINI_SHA /bin/tini" | sha256sum -c -
COPY init.groovy /usr/share/jenkins/ref/init.groovy.d/tcp-slave-agent-port.groovy
# jenkins version being bundled in this docker image
ARG JENKINS_VERSION
ENV JENKINS_VERSION ${JENKINS_VERSION:-2.60.1}
# jenkins.war checksum, download will be validated using it
ARG JENKINS_SHA=34fde424dde0e050738f5ad1e316d54f741c237bd380bd663a07f96147bb1390
# Can be used to customize where jenkins.war get downloaded from
ARG JENKINS_URL=https://repo.jenkins-ci.org/public/org/jenkins-ci/main/jenkins-war/${JENKINS_VERSION}/jenkins-war-${JENKINS_VERSION}.war
# could use ADD but this one does not check Last-Modified header neither does it allow to control checksum
# see https://github.com/docker/docker/issues/8331
RUN curl -fsSL ${JENKINS_URL} -k -o /usr/share/jenkins/jenkins.war \
&& echo "${JENKINS_SHA} /usr/share/jenkins/jenkins.war" | sha256sum -c -
ENV JENKINS_UC https://updates.jenkins.io
RUN chown -R ${user} "$JENKINS_HOME" /usr/share/jenkins/ref
# for main web interface:
EXPOSE ${http_port}
# will be used by attached slave agents:
EXPOSE ${agent_port}
ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log
USER ${user}
COPY jenkins-support /usr/local/bin/jenkins-support
COPY jenkins.sh /usr/local/bin/jenkins.sh
ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/jenkins.sh"]
# from a derived Dockerfile, can use `RUN plugins.sh active.txt` to setup /usr/share/jenkins/ref/plugins from a support bundle
COPY plugins.sh /usr/local/bin/plugins.sh
COPY install-plugins.sh /usr/local/bin/install-plugins.sh
The problem was with the docker version. My Docker version was old. Not sure which command was not supported, but the new docker built the dockerfile.
Newbie in docker here.
I want to build a project using go language and my docker-compose.yml file has the following:
go:
image: golang:1.7-alpine
volumes:
- ./:/server/http
ports:
- "80:8080"
links:
- postgres
- mongodb
- redis
environment:
DEBUG: 'true'
PORT: '8080'
When I run docker-compose up -d in terminal, it returns the following error:
`ERROR: for go Cannot create container for service go: No command specified`
How should I fix it?
Golang:1.7-alpine is just a basis for building a Go container, and does not have a CMD or an ENTRYPOINT, so ends immediately.
Use an image doing really something, like printing hello world every 45 seconds
I solved it by using golang:1.7 instead of golang:1.7-alpine.
You should run your container with an argument which will be passed to the default ENTRYPOINT, to be executed as a command
But the best practice these days is to use multistage, in order to generate a smaller image with just your application.
Or you can define your ENTRYPOINT being your build Go application.
See 'Using Docker Multi-Stage Builds with Go ', using the new AS build-stage keyword.
Your Dockerfile would be:
# build stage
ARG GO_VERSION=1.8.1
FROM golang:${GO_VERSION}-alpine AS build-stage
MAINTAINER fbgrecojr#me.com
WORKDIR /go/src/github.com/frankgreco/gobuild/
COPY ./ /go/src/github.com/frankgreco/gobuild/
RUN apk add --update --no-cache \
wget \
curl \
git \
&& wget "https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-`go env GOHOSTOS`-`go env GOHOSTARCH`.tar.gz" -O /tmp/glide.tar.gz \
&& mkdir /tmp/glide \
&& tar --directory=/tmp/glide -xvf /tmp/glide.tar.gz \
&& rm -rf /tmp/glide.tar.gz \
&& export PATH=$PATH:/tmp/glide/`go env GOHOSTOS`-`go env GOHOSTARCH` \
&& glide update -v \
&& glide install \
&& CGO_ENABLED=0 GOOS=`go env GOHOSTOS` GOARCH=`go env GOHOSTARCH` go build -o foo \
&& go test $(go list ./... | grep -v /vendor/) \
&& apk del wget curl git
# production stage
FROM alpine:3.5
MAINTAINER fbgrecojr#me.com
COPY --from=build-stage /go/src/github.com/frankgreco/go-docker-build/foo .
ENTRYPOINT ["/foo"]
I am trying to set up a customised Jenkins 2 server from a dockerfile.
I use the official image and I want to be able to add things that I need like custom jobs and an admin user.
This is my dockerfile so far:
FROM openjdk:8-jdk
RUN apt-get update && apt-get install -y git curl && rm -rf /var/lib/apt/lists/*
ENV JENKINS_HOME /var/jenkins_home
ENV JENKINS_SLAVE_AGENT_PORT 50000
ARG user=jenkins
ARG group=jenkins
ARG uid=1000
ARG gid=1000
# Jenkins is run with user `jenkins`, uid = 1000
# If you bind mount a volume from the host or a data container,
# ensure you use the same uid
RUN groupadd -g ${gid} ${group} \
&& useradd -d "$JENKINS_HOME" -u ${uid} -g ${gid} -m -s /bin/bash ${user}
# Jenkins home directory is a volume, so configuration and build history
# can be persisted and survive image upgrades
VOLUME /var/jenkins_home
# `/usr/share/jenkins/ref/` contains all reference configuration we want
# to set on a fresh new installation. Use it to bundle additional plugins
# or config file with your custom jenkins Docker image.
RUN mkdir -p /usr/share/jenkins/ref/init.groovy.d
ENV TINI_VERSION 0.9.0
ENV TINI_SHA fa23d1e20732501c3bb8eeeca423c89ac80ed452
# Use tini as subreaper in Docker container to adopt zombie processes
RUN curl -fsSL https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-static -o /bin/tini && chmod +x /bin/tini \
&& echo "$TINI_SHA /bin/tini" | sha1sum -c -
COPY init.groovy /usr/share/jenkins/ref/init.groovy.d/tcp-slave-agent-port.groovy
# jenkins version being bundled in this docker image
ARG JENKINS_VERSION
ENV JENKINS_VERSION ${JENKINS_VERSION:-2.19.2}
# jenkins.war checksum, download will be validated using it
ARG JENKINS_SHA=32b8bd1a86d6d4a91889bd38fb665db4090db081
# Can be used to customize where jenkins.war get downloaded from
ARG JENKINS_URL=https://repo.jenkins-ci.org/public/org/jenkins-ci/main/jenkins-war/${JENKINS_VERSION}/jenkins-war-${JENKINS_VERSION}.war
# could use ADD but this one does not check Last-Modified header neither does it allow to control checksum
# see https://github.com/docker/docker/issues/8331
RUN curl -fsSL ${JENKINS_URL} -o /usr/share/jenkins/jenkins.war \
&& echo "${JENKINS_SHA} /usr/share/jenkins/jenkins.war" | sha1sum -c -
ENV JENKINS_UC https://updates.jenkins.io
RUN chown -R ${user} "$JENKINS_HOME" /usr/share/jenkins/ref
# for main web interface:
EXPOSE 8080
# will be used by attached slave agents:
EXPOSE 50000
ENV COPY_REFERENCE_FILE_LOG $JENKINS_HOME/copy_reference_file.log
USER ${user}
COPY jenkins-support /usr/local/bin/jenkins-support
COPY jenkins.sh /usr/local/bin/jenkins.sh
ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/jenkins.sh"]
# from a derived Dockerfile, can use `RUN plugins.sh active.txt` to setup /usr/share/jenkins/ref/plugins from a support bundle
COPY plugins.txt /usr/share/jenkins/plugins.txt
COPY plugins.sh /usr/local/bin/plugins.sh
COPY install-plugins.sh /usr/local/bin/install-plugins.sh
# Add the command line tools
COPY jenkins-cli.jar "$JENKINS_HOME"
# Create jobs
ARG job_name_1="my_super_job"
#ARG job_name_2="my_ultra_job"
# create the jobs folder recursively
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/workspace/
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/lastFailedBuild
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/lastStableBuild
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/lastSuccessfulBuild
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/lastUnstableBuild
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/lastUnsuccessfulBuild
RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_1}/builds/legacyIds
#RUN mkdir -p "$JENKINS_HOME"/jobs/${job_name_2}
## add the custom configs to the container
COPY ${job_name_1}_config.xml "$JENKINS_HOME"/jobs/${job_name_1}/config.xml
USER root
#RUN chmod 600 "$JENKINS_HOME"/jobs/${job_name_1}/config.xml
RUN java -jar /var/jenkins_home/jenkins-cli.jar -s http://localhost:8080 create-job my_super_job < /var/jenkins_home/jobs/my_super_job/config.xml
#COPY ${job_name_2}_config.xml "$JENKINS_HOME"/jobs/${job_name_2}/config.xml
# --Install plugins--
# Notice: Deprecated method which however works with a 'plugins.txt' file
#USER root
#RUN chmod 600 /usr/share/jenkins/plugins.txt
#RUN chmod 600 /usr/local/bin/install-plugins.sh
#RUN /usr/local/bin/plugins.sh /usr/share/jenkins/plugins.txt
# Notice: Recommended method with open case on Github [https://github.com/jenkinsci/docker/issues/348]
# Notice: Select whichever plugins you want
#RUN /usr/local/bin/install-plugins.sh \
#dashboard-view:2.9.10 \
#pipeline-stage-view:2.2 \
#parameterized-trigger:2.32 \
#bitbucket:1.1.5 \
#git:3.0.0 \
#github:1.22.4
# --Install plugins--
I have tried to create a job on build time by first launching a container, creating the job manually, saving the config.xml file, and then copying it in the image from the Dockerfile. Moreover, I am trying to replicate the files/folder structure when a job is being created.
But it is not working. The job is not appearing in Jenkins.
I also tried to use the jenkins-cli.jar, but as I understood , there must be a live Jenkins server to connect to and execute anything which is not the case at build time.
Finally, I suppose creating an admin user in build time must be way more complicated that creating a job...
So, does anyone have any experience on this?