I have the following Dockerfile (I've removed what is not relevant):
FROM centos:centos6
ENV TERM=xterm
ARG INSTALL_WKHTMLTOPDF=no
ARG WKHTMLTOPDF_VERSION=latest
ARG INSTALL_PDFTK=no
ARG PDFTK_VERSION=latest
ARG PHP_VERSION=default
...
COPY container-files /
...
EXPOSE 80 9001
WORKDIR /var/www/html
ENTRYPOINT bash -C '/entrypoint.sh';'bash'
The entrypoint.sh is as follow:
#!/bin/bash
set -e
if [ "$UID" == 0 ]; then
uid=1000;
else
uid=${UID};
fi
if [ -z "${GID}" ]; then
gid=1000;
else
gid=${GID};
fi
echo "UID: $uid"
echo "GID: $gid"
touch /var/log/xdebug.log
chown apache:root /var/log/xdebug.log
rm -f /var/run/apache2/apache2.pid
exec httpd -DFOREGROUND "$#"
And finally the docker-compose.yml file:
version: '3.4'
services:
erx:
image: arx_dev
ports:
- "80:80"
environment:
VHOST_DOCUMENT_ROOT: /var/www/html
volumes:
- ./server_logs:/var/log/:ro
After build the image and try docker-compose up -d it does not start because touch can't create the file in a RO filesystem.
PS F:\Development\docker\rx> docker logs rx_erx_1
UID: 1000
GID: 1000
touch: cannot touch `/var/log/xdebug.log': Read-only file system
PS F:\Development\docker\rx>
How I can create the file and then mount the /var/log as read only? I would like to check some logs from the host directly and avoid bash into the container. Any ideas?
Related
So what I wanted to do is using "COPY script.sh script.sh" (copy script from hos to container and execute) but when executing script in container , seems this script is also executing on host machine.
Below is Dockerfile :
FROM almalinux/almalinux:latest
RUN mkdir /opt/confluent
RUN mkdir /opt/confluent-hub
#Confluent Home
ENV CONFLUENT_HOME=/opt/confluent
ENV KAFKA_CONFIG=$KAFKA_CONFIG
ENV ZOOKEEPER_CONFIG=$ZOOKEEPER_CONFIG
ENV SCHEMA_REGISTRY_CONFIG=$ZOOKEEPER_CONFIG
ENV CONNECT_CONFIG=$CONNECT_CONFIG
# Zookeeper
ENV ZOOKEEPER_DATA_DIR=$ZOOKEEPER_DATA_DIR
ENV ZOOKEEPER_CLIENT_PORT=$ZOOKEEPER_CLIENT_PORT
#Kafka
ENV BOOTSTRAP_SERVERS=$BOOTSTRAP_SERVERS
ENV KAFKA_SERVER_BROKER_ID=$KAFKA_SERVER_BROKER_ID
ENV ZOOKEEPER_CONNECT_IP_PORT=$ZOOKEEPER_CONNECT_IP_PORT
ENV KAFKA_SERVER_LOG_DIR=$KAFKA_SERVER_LOG_DIR
# schmea registry
ENV KAFKASTORE_TOPIC=$KAFKASTORE_TOPIC
ENV PROTOCOL_BOOTSTRAP_SERVERS=$PROTOCOL_BOOTSTRAP_SERVERS
ENV SCHEMA_REGISTRY_GROUP_ID=$SCHEMA_REGISTRY_GROUP_ID
ENV SCHEMA_REGISTRY_LEADER_ELIGIBILITY=$SCHEMA_REGISTRY_LEADER_ELIGIBILITY
# Kafka connect
ENV CONNECT_REST_PORT=$CONNECT_REST_PORT
ENV CONNECT_OFFSETS=$CONNECT_OFFSETS
ENV CONNECT_KEY_CONVERTER=$CONNECT_KEY_CONVERTER
ENV SCHEMA_REGISTRY_URL=$SCHEMA_REGISTRY_URL
ENV CONNECT_VALUE_CONVERTER=$CONNECT_VALUE_CONVERTER
ENV SCHEMA_REGISTRY_LISTENER=$SCHEMA_REGISTRY_LISTENER
ENV CONNECT_PLUGIN_PATH=/usr/share/java/,$CONFLUENT_HOME/share/confluent-hub-components/
# install openjdk8
RUN dnf update -y && dnf install epel-release -y
RUN dnf install wget zip moreutils gettext unzip java-1.8.0-openjdk.x86_64 -y
# install conflunet
WORKDIR $CONFLUENT_HOME
RUN wget https://packages.confluent.io/archive/6.1/confluent-community-6.1.1.tar.gz -P .
RUN tar -xvzf confluent-community-6.1.1.tar.gz
RUN mv confluent-6.1.1/* .
RUn rm -rf confluent-6.1.1 confluent-community-6.1.1.tar.gz
# install confluent hub
RUN wget http://client.hub.confluent.io/confluent-hub-client-latest.tar.gz -P /opt/confluent-hub
WORKDIR /opt/confluent-hub
RUN tar -xvzf confluent-hub-client-latest.tar.gz
RUN rm -rf confluent-hub-client-latest.tar.gz
ENV CONFLUENT_HUB /opt/confluent-hub/bin
# Export path
ENV PATH $PATH:$CONFLUENT_HOME:$CONFLUENT_HUB
# install jdbc connector
COPY confluentinc-kafka-connect-jdbc-10.1.0.zip $CONFLUENT_HOME/share/confluent-hub-components/
RUN unzip $CONFLUENT_HOME/share/confluent-hub-components/confluentinc-kafka-connect-jdbc-10.1.0.zip
RUN rm -rf confluentinc-kafka-connect-jdbc-10.1.0.zip
# Copy confleunt config to docker
WORKDIR $CONFLUENT_HOME
COPY config/* config/
# startup
COPY startup.sh ./startup.sh
RUN chmod +x ./startup.sh
CMD ./startup.sh
Below is startup.sh which replaces environment variables in config files and starts kafka service but this script when run in container is replacing values on host config file :
#!/bin/bash
# Substitue environment variables in actual $CONFLUENT_HOME/configs
envsubst < $CONFLUENT_HOME/config/zookeeper.properties | sponge $CONFLUENT_HOME/config/zookeeper.properties
envsubst < $CONFLUENT_HOME/config/server.properties | sponge $CONFLUENT_HOME/config/server.properties
envsubst < $CONFLUENT_HOME/config/schema-registry.properties | sponge $CONFLUENT_HOME/config/schema-registry.properties
envsubst < $CONFLUENT_HOME/config/connect-avro-standalone.properties | sponge $CONFLUENT_HOME/config/connect-avro-standalone.properties
# start zookeeper
$CONFLUENT_HOME/bin/zookeeper-server-start -daemon $ZOOKEEPER_CONFIG
sleep 2
# start kafka broker
$CONFLUENT_HOME/bin/kafka-server-start -daemon $KAFKA_CONFIG
sleep 2
# start schema registry
$CONFLUENT_HOME/bin/schema-registry-start -daemon $SCHEMA_REGISTRY_CONFIG
sleep 2
# start kafka connect
$CONFLUENT_HOME/bin/connect-standalone -daemon $CONNECT_CONFIG $CONFLUENT_HOME/etc/kafka/connect-file-sink.properties
sleep 2
while :
do
echo "Confluent Running "
sleep 5
done
docker-compose :
version : "3.9"
services:
confluent-community:
build: ./
environment:
- KAFKA_CONFIG=$CONFLUENT_HOME/config/server.properties
- ZOOKEEPER_CONFIG=$CONFLUENT_HOME/config/zookeeper.properties
- SCHEMA_REGISTRY_CONFIG=$CONFLUENT_HOME/config/schema-registry.properties
- CONNECT_CONFIG=$CONFLUENT_HOME/config/connect-avro-standalone.properties
- CONNECT_REST_PORT=8083
- CONNECT_OFFSETS=$CONFLUENT_HOME/data/connect/connect.offsets
- CONNECT_KEY_CONVERTER=io.confluent.connect.avro.AvroConverter
- SCHEMA_REGISTRY_URL=http://localhost:8081
- CONNECT_VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter
- SCHEMA_REGISTRY_LISTENER=http://0.0.0.0:8081
- KAFKASTORE_TOPIC=_schemas
- SCHEMA_REGISTRY_GROUP_ID=SCHEMA_REGISTRY_A
- SCHEMA_REGISTRY_LEADER_ELIGIBILITY=true
- PROTOCOL_BOOTSTRAP_SERVERS=PLAINTEXT://localhost:9092
- ZOOKEEPER_DATA_DIR=$CONFLUENT_HOME/data/zookeeper
- ZOOKEEPER_CLIENT_PORT=2181
- BOOTSTRAP_SERVERS=localhost:9092
- KAFKA_SERVER_BROKER_ID=0
- ZOOKEEPER_CONNECT_IP_PORT=localhost:2181
- KAFKA_SERVER_LOG_DIR=$CONFLUENT_HOME/data/kafka-logs
# ports:
#- "9092:9092"
# - "8081:8081"
#- "8083:8083"
network_mode: "host"
volumes:
- ~/Documents/confluent/docker-logs:/opt/confluent/logs
- ~/Documents/confluent/config:/opt/confluent/config
- ~/Documents/confluent/docker-data:/opt/confluent/data
When you bind-mount configuration files into a container
volumes:
- ~/Documents/confluent/config:/opt/confluent/config
the files in the container are the files on the host. When your startup script uses envsubst to rewrite the configuration files, there's not a separate copy in the container, so it rewrites the files on the host as well.
If you use a separate directory instead:
volumes:
- ~/Documents/confluent/config:/opt/confluent/config-templates
Then your script can read the files in that directory, and write to a non-volume directory:
for f in "$CONFLUENT_HOME/config-templates/*"; do
ff=$(basename "$f")
envsubst <$f >"$CONFLUENT_HOME/config/$ff"
done
(Run the four processes in four separate containers, without using a -daemon option so they're the single foreground process in their respective containers. You shouldn't need to configure any of the filesystem paths or inject them at run time; the *_CONFIG environment variables, for example, can be safely left at their default values, or if they must be set, set them only in the Dockerfile).
I'm attempting to SSHFS from the container to a remote server, with the mount created during the Dockerfile build.
The mount command works if executed in the already running container, and will work if I make the command the entrypoint (but then I have to string on the real entrypoint script on the end with a ; which feels too klugy.)
If I put the command in the Dockerfile with a RUN, it fails with a fuse: device not found, try 'modprobe fuse' first error.
Here's the files...
install.sh
#!/bin/bash
USAGE="install.sh <dir_to_parse> <filetype_to_parse>"
if [ $# -lt 2 ]
then
echo "$USAGE"
exit 1
fi
REMOTE_DIR=$1 FILE_EXTENSION=$2 docker-compose -p '' -f docker-compose.yml up -d --build
docker-compose.yml
version: "3"
services:
source.test:
build:
context: .
dockerfile: ./Dockerfile
image: test.source
container_name: test.source
environment:
ELASTIC_HOST: “http://<redacted>:<redacted>”
REMOTE_SERVER: <redacted>
REMOTE_USER: <redacted>
REMOTE_KEY: /etc/ssl/certs/<redacted>
FEEDER_URL: http://<redacted>/api
MONGOHOST: mongo
WALKDIRS: <redacted>
REMOTE_DIR: ${REMOTE_DIR}
FILE_EXTENSION: ${FILE_EXTENSION}
volumes:
- /etc/ssl/certs/:/etc/ssl/certs/
ports:
- 127.0.0.1:6000:80
cap_add:
- SYS_ADMIN
devices:
- "/dev/fuse:/dev/fuse"
security_opt:
- "apparmor:unconfined"
networks:
default:
external:
name: test
Dockerfile
FROM ubuntu:18.04
RUN apt-get update && apt-get -y install \
fuse \
sshfs
COPY <redacted> /etc/ssl/certs/<redacted>
COPY fuse.conf /etc/fuse.conf
RUN chown root:root /etc/fuse.conf
RUN chmod 644 /etc/fuse.conf
RUN mkdir /mnt/filestobeparsed
# Fails with fuse: device not found
RUN sshfs username#<xxx.xxx.xxx.xxx>:/remote/path /mnt/filestobeparsed -o StrictHostKeyChecking=no,IdentityFile=/etc/ssl/certs/<redacted>,auto_cache,reconnect,transform_symlinks,follow_symlinks,allow_other
ENTRYPOINT tail -f /dev/null
# Works but is klugy
#ENTRYPOINT sshfs username#<xxx.xxx.xxx.xxx>:/remote/path /mnt/filestobeparsed -o StrictHostKeyChecking=no,IdentityFile=/etc/ssl/certs/<redacted>,auto_cache,reconnect,transform_symlinks,follow_symlinks,allow_other; tail -f /dev/null
I'm trying to build docker-compose, but I'm getting this error:
ERROR: for indicaaquicombrold_mysqld_1 Cannot start service mysqld:
oci runtime error: container_linux.go:247: starting container process
caused "exec: \"/docker-entrypoint.sh\": permission denied"
ERROR: for mysqld Cannot start service mysqld: oci runtime error:
container_linux.go:247: starting container process caused "exec:
\"/docker-entrypoint.sh\": permission denied"
ERROR: Encountered errors while bringing up the project.
docker-compose.yml
version: '3'
services:
php:
build:
context: ./docker/php
image: indicaaqui.com.br:tag
volumes:
- ./src:/var/www/html/
- ./config/apache-config.conf:/etc/apache2/sites-enabled/000-default.conf
ports:
- "80:80"
- "443:443"
mysqld:
build:
context: ./docker/mysql
environment:
- MYSQL_DATABASE=db_indicaaqui
- MYSQL_USER=indicaqui
- MYSQL_PASSWORD=secret
- MYSQL_ROOT_PASSWORD=docker
volumes:
- ./config/docker-entrypoint.sh:/docker-entrypoint.sh
- ./database/db_indicaaqui.sql:/docker-entrypoint-initdb.d/db_indicaaqui.sql
Dockerfile (php)
FROM php:5.6-apache
MAINTAINER Limup <limup#outlook.com>
CMD [ "php" ]
RUN docker-php-ext-install pdo_mysql
# Enable apache mods.
# RUN a2enmod php5.6
RUN a2enmod rewrite
# Expose apache.
EXPOSE 80
EXPOSE 443
# Use the default production configuration
# RUN mv "$PHP_INI_DIR/php.ini-production" "$PHP_INI_DIR/php.ini"
RUN mv "$PHP_INI_DIR/php.ini-development" "$PHP_INI_DIR/php.ini"
# Override with custom opcache settings
# COPY ./../../config/php.ini $PHP_INI_DIR/conf.d/
# Manually set up the apache environment variables
ENV APACHE_RUN_USER www-data
ENV APACHE_RUN_GROUP www-data
ENV APACHE_LOG_DIR /var/log/apache2
ENV APACHE_LOCK_DIR /var/lock/apache2
ENV APACHE_PID_FILE /var/run/apache2.pid
# Update the PHP.ini file, enable <? ?> tags and quieten logging.
RUN sed -i "s/short_open_tag = Off/short_open_tag = On/" "$PHP_INI_DIR/php.ini"
RUN sed -i "s/error_reporting = .*$/error_reporting = E_ERROR | E_WARNING | E_PARSE/" "$PHP_INI_DIR/php.ini"
RUN a2dissite 000-default.conf
RUN chmod -R 777 /etc/apache2/sites-enabled/
WORKDIR /var/www/html/
# By default start up apache in the foreground, override with /bin/bash for interative.
CMD ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"]
Dockerfile (Mysql)
FROM mariadb:latest
RUN chmod -R 777 /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
EXPOSE 3306
CMD ["mysqld"]
Please, help me solve this problem!
Any ideas?
That is most likely a Linux file permission issue on config/docker-entrypoint.sh. If your host is Linux/Mac, you can run:
chmod 755 config/docker-entrypoint.sh
For more on linux permissions, here's a helpful article: https://www.linux.com/learn/understanding-linux-file-permissions
First, you need to copy entrypoint.sh file into other directory instead of same your source code (Eg. /home/entrypoint.sh), then grant permission to execute entrypoint script:
RUN ["chmod", "+x", "/home/entrypoint.sh"]
Solution
ENV USER root
ENV WORK_DIR_PATH /home
RUN mkdir -p $WORK_DIR_PATH && chown -R $USER:$USER $WORK_DIR_PATH
WORKDIR $WORK_DIR_PATH
Info
The USER instruction sets the user name (or UID) and optionally the user group (or GID) to use when running the image and for any RUN, CMD and ENTRYPOINT instructions that follow it in the Dockerfile.
The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile. If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.
Links
chown command
docker builder reference
A pretty common solution if nothing works is to re-install Docker.. That's what ended up working for me after trying for like 5 hours everything under the sun in terms of permissions etc.
I am using docker on Mac and trying to get a persistent container of postgresql database using nfs volume.
I put one line /Users/me/db -alldirs *(rw,sync,no_subtree_check,no_root_squash) in /etc/exports and restart nfsd. I think(correct me if I am wrong) the key point is no_root_squash which will allow the client root user to be still root user.Then in my docker-compose.yml, I declare the nfsmount point as the follwing:
version: '2'
volumes:
nfsmountdbdata:
driver: local
driver_opts:
type: nfs
o: addr=host.docker.internal,rw,nolock,hard,nointr,nfsvers=3
device: ":/Users/me/db/data"
nfsmountdbinit:
driver: local
driver_opts:
type: nfs
o: addr=host.docker.internal,rw,nolock,hard,nointr,nfsvers=3
device: ":/Users/me/db/initdb"
services:
## POSTGRES DATABASE
db:
image: postgres:9.6
privileged: true
volumes:
#- ./services/db/initdb:/docker-entrypoint-initdb.d
#- ./services/db/app:/var/lib/postgresql/data
- nfsmountdbinit:/docker-entrypoint-initdb.d
- nfsmountdbdata:/var/lib/postgresql/data
ports:
- 5432:5432
But when the container db starts, it complains a lot about chown: changing ownership of '/var/lib/postgresql/data/base/**/**': Operation not permitted. It makes me feel very confused as I have done something(no_root_squash configuration in nfs) to fix it. But it just does not work. What's wrong with my understanding here? I am using Mac Mojave and Docker desktop for Mac 2.0.0.0 stabel.
You don't need to create a new image to change the user, you can just run the postgres image as a different user instead:
postgres:
image: postgres:9.6
environment:
- PGDATA=/var/lib/postgresql/data/pgdata
user: "${UID:?You must do 'export UID' to launch}:${GID:?You must do 'export GID' to launch}"
volumes:
- nfsmountdbdata:/var/lib/postgresql/data
ports:
- 5432:5432
I believe I solved this...
Dockerfile
FROM postgres:9.6
ARG GNAME='groupname'
ARG GID='groupid'
ARG USERID=999
# fix permissions so it can persist data on the host nfs file system
RUN groupadd -g $GID $GNAME \
&& usermod -g $GNAME postgres \
&& usermod -u $USERID postgres
# go get the entrypoint script from their git hub link and details to follow
COPY ./docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
CMD ["postgres"]
Get the Postgres Entrypoint scrip here
Make edits to it commenting out lines 34, 35, 36 52, 53, 54. Basically where it tries to chmod and chown the NFS folders.
...
if [ "$1" = 'postgres' ] && [ "$(id -u)" = '0' ]; then
#mkdir -p "$PGDATA"
#chown -R postgres "$PGDATA"
#chmod 700 "$PGDATA"
...
if [ "$1" = 'postgres' ]; then
#mkdir -p "$PGDATA"
#chown -R "$(id -u)" "$PGDATA" 2>/dev/null || :
#chmod 700 "$PGDATA" 2>/dev/null || :
...
Now build the image...
docker build -t postgres9.6:nfs --build-arg GID=<NFS GROUP ID> ==build-arg GNAME=<NFS GROUP NAME> --build-arg USERID=<NFS USER ID> .
What I mean by NFS GROUP ID, USER ID, and GROUP NAME is the user/group that has read/write access to the NFS folders.
Now you should have a Postgres Docker image that is capable of using NFS Host Volumes to store the database data.
Hope this helps..
Something that worked easiest for me was to add a Dockerfile that did the following:
FROM postgres:11.2
ENV TZ=America/Los_Angeles
# Make us the same gid/id as the nfs mount.
RUN sed -i 's/:999:/:5081:/g' /etc/group
RUN sed -i 's/:999:999:/:5081:5081:/g' /etc/passwd
CMD [ "postgres", "-c", "max_connections=10000"]
I am building an app with Go and Glide in docker. I also have to use reflex to trigger the compiling automatically.
I can not figure out how to make Glide work out with docker.
Dockerfile
FROM golang:1.8.1-alpine
ENV GOBINARIES /go/bin
ENV BUILDPATH /code
ENV REFLEXURL=http://s3.amazonaws.com/wbm-raff/bin/reflex1.8a
ENV REFLEXSHA=19bdbbb68c869f85ee22a6b7fa9c73f8e5b46d0fe7a73df37e028555a6ba03e8
WORKDIR $GOBINARIES
RUN rm -rf /var/cache/apk/*
RUN wget -q "$REFLEXURL" -O reflex
RUN chmod +x /go/bin/reflex
ENV TOOLS /go/_tools
RUN mkdir -p $BUILDPATH
ENV PORT 5000
EXPOSE $PORT
RUN mkdir -p $TOOLS
ADD build.sh $TOOLS
ADD reflex.conf $TOOLS
RUN chown root $TOOLS/build.sh
RUN chmod +x $TOOLS/build.sh
WORKDIR $BUILDPATH
CMD ["reflex","-c","/go/_tools/reflex.conf"]
build.sh
set -e
echo "[build.sh:building binary]"
cd $BUILDPATH
glide install -s -v
go build -o /servicebin && rm -rf /tmp/*
echo "[build.sh:launching binary]"
/servicebin
reflex.conf
-sr '\.build$' -- sh -c '/go/_tools/build.sh'
docker-compose.yaml
version: '3'
services:
logen:
build:
context: ./Docker
dockerfile: Dockerfile
ports:
- "5000:5000"
volumes:
- .:/code
Atom on-save plugin configuration file
[
{
"srcDir": ".",
"destDir": ".",
"files": "**/*.go",
"command": "echo $(date) - ${srcFile} > .build"
}
]
main.go
package main
import (
"io"
"log"
"net/http"
"os"
"github.com/astaxie/beego"
)
func hello(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "Hello world!1")
}
func main() {
log.SetOutput(os.Stdout)
port := ":" + os.Getenv("PORT")
http.HandleFunc("/", hello)
log.Printf("\n Application is listening on %v\n", port)
http.ListenAndServe(port, nil)
}
Actually, I do not need to install Glide in the container! Just reflect the vendor folder in host machine to $GOPATH/src in docker-compose.yml. Then the compile will be ok.
version: '3'
services:
logen:
build:
context: ./Docker
dockerfile: Dockerfile
ports:
- "5000:5000"
volumes:
- .:/code
- ./vendor:/go/src