I've 4 containers configured like the following (docker-compose.yml):
version: '3'
networks:
my-ntwk:
ipam:
config:
- subnet: 172.20.0.0/24
services:
f-app:
image: f-app
tty: true
container_name: f-app
hostname: f-app.info.my
ports:
- "22:22"
networks:
my-ntwk:
ipv4_address: 172.20.0.5
extra_hosts:
- "f-db.info.my:172.20.0.6"
- "p-app.info.my:172.20.0.7"
- "p-db.info.my:172.20.0.8"
depends_on:
- f-db
- p-app
- p-db
f-db:
image: f-db
tty: true
container_name: f-db
hostname: f-db.info.my
networks:
my-ntwk:
ipv4_address: 172.20.0.6
p-app:
image: p-app
tty: true
container_name: p-app
hostname: p-app.info.my
networks:
my-ntwk:
ipv4_address: 172.20.0.7
p-db:
image: p-db
tty: true
container_name: prod-db
hostname: p-db.info.my
networks:
my-ntwk:
ipv4_address: 172.20.0.8
Each image is build by the same Dockerfile :
FROM openjdk:8
RUN apt-get update && \
apt-get install -y openssh-server
EXPOSE 22
RUN useradd -s /bin/bash -p $(openssl passwd -1 myuser) -d /home/nf2/ -m myuser
ENTRYPOINT service ssh start && bash
Now I want to be able to connect from f-app to any other machine without typing the password when running this line : ssh myuser#f-db.info.my.
I know that I need to exchange ssh-keys between the servers (thats not a problem). My problem is how to do it with docker containers and when (build or runtime)!
For doing ssh without password you to need to create passwordless user along with configuring SSH keys in the container, plus you will also need to add ssh keys in the sources container plus public key should be added in the authorized of the destination container.
Here is the working Dockerfile
FROM openjdk:7
RUN apt-get update && \
apt-get install -y openssh-server vim
EXPOSE 22
RUN useradd -rm -d /home/nf2/ -s /bin/bash -g root -G sudo -u 1001 ubuntu
USER ubuntu
WORKDIR /home/ubuntu
RUN mkdir -p /home/nf2/.ssh/ && \
chmod 0700 /home/nf2/.ssh && \
touch /home/nf2/.ssh/authorized_keys && \
chmod 600 /home/nf2/.ssh/authorized_keys
COPY ssh-keys/ /keys/
RUN cat /keys/ssh_test.pub >> /home/nf2/.ssh/authorized_keys
USER root
ENTRYPOINT service ssh start && bash
docker-compose will remain same, here is the testing script that you can try.
#!/bin/bash
set -e
echo "start docker-compose"
docker-compose up -d
echo "list of containers"
docker-compose ps
echo "starting ssh test from f-db to f-app"
docker exec -it f-db sh -c "ssh -i /keys/ssh_test ubuntu#f-app"
For further detail, you can try the above working example docker-container-ssh
git clone git#github.com:Adiii717/docker-container-ssh.git
cd docker-container-ssh;
./test.sh
You can replace the keys as these were used for testing purpose only.
If you are using docker compose an easy choice is to forward SSH agent like that:
something:
container_name: something
volumes:
- $SSH_AUTH_SOCK:/ssh-agent # Forward local machine SSH key to docker
environment:
SSH_AUTH_SOCK: /ssh-agent
ssh-forwarding on macOS hosts - instead of mounting the path of $SSH_AUTH_SOCK, you have to mount this path - /run/host-services/ssh-auth.sock
or you can do it like:
It's a harder problem if you need to use SSH at build time. For example if you're using git clone, or in my case pip and npm to download from a private repository.
The solution I found is to add your keys using the --build-arg flag. Then you can use the new experimental --squash command (added 1.13) to merge the layers so that the keys are no longer available after removal. Here's my solution:
Build command
$ docker build -t example --build-arg ssh_prv_key="$(cat ~/.ssh/id_rsa)" --build-arg ssh_pub_key="$(cat ~/.ssh/id_rsa.pub)" --squash .
Dockerfile
FROM openjdk:8
ARG ssh_prv_key
ARG ssh_pub_key
RUN apt-get update && \
apt-get install -y \
git \
openssh-server \
libmysqlclient-dev
# Authorize SSH Host
RUN mkdir -p /root/.ssh && \
chmod 0700 /root/.ssh && \
ssh-keyscan github.com > /root/.ssh/known_hosts
# Add the keys and set permissions
RUN echo "$ssh_prv_key" > /root/.ssh/id_rsa && \
echo "$ssh_pub_key" > /root/.ssh/id_rsa.pub && \
chmod 600 /root/.ssh/id_rsa && \
chmod 600 /root/.ssh/id_rsa.pub
RUN apt-get update && \
apt-get install -y openssh-server && \
apt-get install -y openssh-client
EXPOSE 22
RUN useradd -s /bin/bash -p $(openssl passwd -1 myuser) -d /home/nf2/ -m myuser
ENTRYPOINT service ssh start && bash
If you're using Docker 1.13+ and/or have experimental features on you can append --squash to the build command which will merge the layers, removing the SSH keys and hiding them from docker history.
Related
I can access to the target with ssh password and with the private key from Jenkins bash, I configured SSH sites on jenkins with the same host, User and private key I get the next error:
Docker logs:
2022-09-23 05:06:52.357+0000 [id=71] SEVERE o.j.h.p.SSHBuildWrapper$DescriptorImpl#doLoginCheck: Auth fail 2022-09-23 05:06:52.367+0000 [id=71] SEVERE o.j.h.p.SSHBuildWrapper$DescriptorImpl#doLoginCheck: Can't connect to server
Docker-compose:
version: '3'
services:
jenkins:
container_name: jenkins
image: jenkins/jenkins
ports:
- "8080:8080"
volumes:
- $PWD/jenkins_home:/var/jenkins_home
networks:
- net
remote_host:
container_name: remote-host
image: remote-host
build:
context: fedora
dockerfile: Dockerfile
networks:
- net
db_host:
container_name: db
image: mysql:5.7
environment:
- "MYSQL_ROOT_PASSWORD=PASSWORD"
volumes:
- "$PWD/db:/var/lib/mysql"
networks:
- net
networks:
net:
DockerFile:
FROM fedora
RUN yum update -y
RUN yum -y install unzip
RUN yum -y install openssh-server
RUN useradd RemoteUser && \
echo "RemoteUser:Password"| chpasswd && \
mkdir /home/madchabelo/.ssh && \
chmod 700 /home/madchabelo/.ssh
COPY remote-ki.pub /home/madchabelo/.ssh/authorized_keys
RUN chown madchabelo:madchabelo -R /home/madchabelo/.ssh/ && \
chmod 600 /home/madchabelo/.ssh/authorized_keys
RUN ssh-keygen -A
RUN yum -y install mysql
RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
sudo ./aws/install
RUN yum -y install vim
CMD /usr/sbin/sshd -D
I try with the IP and I get the same.
regards
When creating a private key, you should create a code with the following command after version ubuntu 20.04.
ssh-keygen -t ecdsa -m PEM -f remote-key
For a more detailed explanation, see the link below:
https://community.jenkins.io/t/ssh-connection-auth-fail/4121/7
I have a program that builds servers automatically whenever we want stakeholders to test a new feature.
Currently I have the following setup:
Container 1 - all (contains nodejs, php and other dependencies)
Container 2 - db (contains the mysql database)
I'm aware that container 1 should be split but this will involve more unnecessary complexity to this stage of development.
Whenever a new feature is completed and ready to be deployed to a stage server we run: yarn run create:server --branchName=new-feature. This will create all of the configuration necessary to bring up our newly created server.
My problem is that whenever I run the command above I need to create a database in db container from all container:
mysql -u root -pxxxx -e "CREATE DATABASE IF NOT EXISTS `xxxx`"
The script main.ts is running in the context of all container, so it is necessary for all to communicate with db.
export const createDatabase = (subdomain: string) => {
const username = process.env.DB_USERNAME;
const password = process.env.DB_PASSWORD;
console.log(`[INFO] Creating database with name \`${subdomain}\``);
// triple back slash is necessary to avoid `command substitution` in some shells
if (isLocalEnviroment()) {
execSync(`docker run -it stage-manager-db mysql -u ${username} -p${password} -e "CREATE DATABASE IF NOT EXISTS \\\`${subdomain}\\\`"`)
} else {
execSync(`mysql -u ${username} -p${password} -e "CREATE DATABASE IF NOT EXISTS \\\`${subdomain}\\\`"`)
}
console.log(`[INFO] Database \`${subdomain}\` created successfully`);
}
On local environment we would like to use docker, while in production everything will sit in the same machine (db, frontendapp and api).
When trying to run the following command docker run -it stage-manager-db mysql -u root -ppassword -e "CREATE DATABASE IF NOT EXISTS master" from all I get
docker: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?.
I have tried restarting the service with:
service docker restart
which gives
[ ok ] Starting Docker: docker.
but trying to communicate with db from all keeps getting the same error. Upon trying to service docker stop I get:
[....] Stopping Docker: dockerstart-stop-daemon: warning: failed to kill 825: No such process
No process in pidfile '/var/run/docker-ssd.pid' found running; none killed.
failed!
From now on I have tried the several links to fix this issue:
https://github.com/docker/for-linux/issues/52#issuecomment-333563492
https://askubuntu.com/questions/1146634/how-to-remove-docker-from-windows-subsystem
Cannot connect to the Docker daemon at unix:///var/run/docker.sock
Cant uninstall Docker from Ubuntu on WSL
How can I communicate from all container to db container?
Dockerfile
FROM php:7.4-fpm
# Install dependencies
RUN apt-get update && apt-get install -y \
build-essential \
libpng-dev \
libjpeg62-turbo-dev \
libfreetype6-dev \
locales \
zip \
jpegoptim optipng pngquant gifsicle \
vim \
unzip \
git \
curl \
libzip-dev \
libfontconfig1 \
libxrender1 \
libpng-dev \
make \
nginx \
apt-transport-https \
gnupg2 \
wget \
procps \
docker.io
# Install nodejs
RUN apt -y install curl dirmngr apt-transport-https lsb-release ca-certificates
RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
RUN apt -y install nodejs
# Install extensions
RUN docker-php-ext-install pdo_mysql exif zip pcntl gd
RUN docker-php-ext-configure gd --with-freetype --with-jpeg
RUN docker-php-ext-install -j$(nproc) gd
# Install composer
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
# Install Yarn
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt update && apt install yarn
# Install dependencies for this project
RUN yarn global add ts-node typescript
RUN useradd -m forge
# Copy existing application directory contents
COPY . /var/www
# Copy existing application directory permissions
COPY --chown=forge:forge . /var/forge
# Copy ssh keys
COPY ./config/ssh /home/forge/.ssh/
# Give right permissions to `ssh` keys
RUN chmod 600 /home/forge/.ssh/config
RUN chmod 600 /home/forge/.ssh/back_end_deploy_key
RUN chmod 600 /home/forge/.ssh/frontend_deploy_key
RUN chmod 644 /home/forge/.ssh/back_end_deploy_key.pub
RUN chmod 644 /home/forge/.ssh/frontend_deploy_key.pub
RUN chown forge:forge /home/forge/.ssh/*
# Up Docker
RUN service docker start
RUN usermod -aG docker forge
# Create folder for stage servers
RUN mkdir -p /var/www/stage-servers
# Give correct permissions to `stage-servers` folder
RUN chown forge:www-data /var/www/stage-servers
RUN chmod g+s /var/www/stage-servers
RUN chmod o-rwx /var/www/stage-servers
# Change current user to forge
USER forge
# Expose port 9000 and start php-fpm server
EXPOSE 9000
CMD ["php-fpm"]
docker-composer.yml
version: '3.7'
services:
all:
working_dir: /var/www/stage-manager
build:
context: .
dockerfile: Dockerfile
ports:
- "8080:80"
volumes:
- "./:/var/www/stage-manager"
- "./config/ssh:/root/.ssh"
networks:
- main
#MySQL Service
db:
image: mysql:5.7.22
container_name: stage-manager-db
restart: unless-stopped
tty: true
ports:
- "3306:3306"
environment:
MYSQL_DATABASE: whatever
MYSQL_ROOT_PASSWORD: password
SERVICE_TAGS: dev
SERVICE_NAME: mysql
volumes:
- dbdata:/var/lib/mysql/
networks:
- main
volumes:
project:
driver: local
driver_opts:
type: none
device: $PWD/
o: bind
dbdata:
driver: local
networks:
main:
I'm fairly new to docker so any approach that I might be doing wrong, please let me know. I have a feeling that this could be done much better so feel free to suggest improvements.
Update
** DO NOT DO THIS **
Instead of deleting this answer I will leave it here so others can see that this is not a secure/valid solution to this problem
By David Maze's comment:
Remember that anyone who can access the Docker socket has unrestricted root-level access over the whole host system. I would not add the Docker socket in casually here.
I was able to make it working by sharing the socket between my host OS and the all container.
docker-compose.yml
all:
working_dir: /var/www/stage-manager
build:
context: .
dockerfile: Dockerfile
ports:
- "8080:80"
volumes:
- "./:/var/www/stage-manager"
- "./config/ssh:/root/.ssh"
- "/var/run/docker.sock:/var/run/docker.sock" -> important part
networks:
- main
I have created 2 docker containers. One Jenkins and one container running an openssh server with ubuntu base.
I configured Jenkins container buy pulling Jenkins image and for openssh server I have created the image using Dockerfile.
I generated key files and copied the public key to authorized_keys of the openssh server.
and then when i try to login via Jenkins container into openssh server using
ssh -i remote-key remote_user#remote-host it is prompting me to give password.
Here is my DockerFile and docker-compose
DockerFile:
From ubuntu
Run apt-get update
Run apt-get install -y openssh-server
Run useradd -m -d /home/remote_user remote_user && \
echo "remote_user:1234" |chpasswd && \
mkdir -p /home/remote_user/.ssh && \
chmod 700 /home/remote_user/.ssh
Copy remote-key.pub /home/remote_user/.ssh/authorised_keys
Run chown remote_user:remote_user -R /home/remote_user/.ssh && \
chmod 600 /home/remote_user/.ssh/authorised_keys
RUN service ssh start
EXPOSE 22
CMD ["/usr/sbin/sshd","-D"]`
docker-compose
version: '3'
services:
jenkins:
container_name: jenkins
image: jenkins/jenkins
ports:
- "8080:8080"
volumes:
- "/home/sarthak/jenkins/jenkins1/jenkins1_home:/var/jenkins_home"
networks:
- net
remote_host:
container_name: remote-host
image: remote-host
build:
context: /home/sarthak/jenkins/jenkins1/
networks:
- net
networks:
net:
login via password is successful but i need a password less login. Please help
You should change authorised_keys to authorized_keys when creating Dockerfile, two lines have the same issue:
COPY remote-key.pub /home/remote_user/.ssh/authorized_keys
and
RUN chown remote_user:remote_user -R /home/remote_user/.ssh && \
chmod 600 /home/remote_user/.ssh/authorized_keys
I have a docker-compose file that runs a few services.
services:
cli:
build:
context: .
dockerfile: docker/cli/Dockerfile
volumes:
- ./drupal8site:/var/www/html/drupal8site
drupal:
container_name: drupal
build:
context: .
dockerfile: docker/DockerFile.drupal
args:
DOC_ROOT: /var/www/html/drupal8site
ports:
- 80:80
volumes:
- ./drupal8site:/var/www/html/drupal8site
restart: always
environment:
APACHE_DOCUMENT_ROOT: /var/www/html/drupal8site/web
mysql:
image: mysql:5.7
ports:
- 3306:3306
volumes:
- ./mysql:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
I would like to add another service which will be a container in which I could run CLI commands (composer, drush for drupal, php, etc).
The following Dockerfile was how I initially defined the cli service but it stops right after it is run. How do I define it so it is part of my docker-compose, shares my mounted volume, and I can interactively connect to it and run CLI commands on it ?
FROM php:7.2-cli
#various programs
RUN apt-get update \
&& apt-get install vim --assume-yes \
&& apt-get install git --assume-yes \
&& apt-get install mysql-client --assume-yes
CMD ["bash"]
Thanks,
Yaron
If you want to run automated scripts on docker images this is obviously a job for a ci-pipeline. You can use CloudFoundry or OpenStack to do this.
But there are many other questions in this post:
1.) How can i share my mounted volume:
You can pass a volume with the -v option to a container. e.g.:
docker run -it -d -v $(pwd)/localFolder:/exposedFolderFromDocker mydockerhub/myawesomeimage
2.) Can I interactively connect to it and run CLI commands on it
docker exec -it docker_cli_1 bash
I recommend to implement features of an docker-image to the individual docker-images Dockerfile. For example copying and running a prepared shell-script:
# your Dockerfile
FROM php:7.2-cli
#various programs
RUN apt-get update \
&& apt-get install vim --assume-yes \
&& apt-get install git --assume-yes \
&& apt-get install mysql-client --assume-yes
# individual changes
COPY your_script.sh /
RUN chown root:root /your_script.sh && \
chmod 0755 /your_script.sh
CMD ["/your_script.sh"]
# a folder to expose
VOLUME /exposedFolderFromDocker
CMD ["bash"]
I would like to have a shared directory between my containers: ftp and s3fs. Todo so, I have created a volume in my docker-compose file called s3.
If I stop s3fs from running in my s3fs container, then I can create files in the ftp container and they will show up in side s3fs under /home/files.
However, when running s3fs the directory /home/files remains empty whilst I create files in the ftp.
This is what my /proc/mounts file looks like:
/dev/sda2 /home/files ext4 rw,relatime,data=ordered 0 0
s3fs /home/files fuse.s3fs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
I belive fuse maybe overriding my docker volume, has anyone encountered this problem before?
docker-compose.yml
version: "3"
services:
ftp:
image: app/proftpd:latest
volumes:
- s3:/home/files
ports:
- 2222:2222
s3fs:
image: app/s3fs:latest
command: start
env_file:
- s3fs/aws.env
volumes:
- s3:/home/files
cap_add:
- SYS_ADMIN
devices:
- "/dev/fuse"
environment:
ENVIRONMENT: "dev"
volumes:
s3:
s3fs - Dockerfile
FROM ubuntu:16.04
RUN apt-get update -qq
RUN apt-get install -y \
software-properties-common
RUN apt-get update -qq
RUN apt-get install -y \
automake \
autotools-dev \
fuse \
g++ \
git \
libcurl4-openssl-dev \
libfuse-dev \
libssl-dev \
libxml2-dev \
make \
pkg-config \
curl
RUN curl -L https://github.com/s3fs-fuse/s3fs-fuse/archive/v1.84.tar.gz | tar zxv -C /usr/src
RUN cd /usr/src/s3fs-fuse-1.84 && ./autogen.sh && ./configure --prefix=/usr --with-openssl && make && make install
COPY entrypoint.sh /opt/s3fs/bin/entrypoint.sh
RUN mkdir -p /home/files
WORKDIR /opt/s3fs/bin
ENTRYPOINT ["/bin/sh", "./entrypoint.sh"]
s3fs - entrypoint.sh
#!/usr/bin/env bash
case $1 in
start)
echo "Starting S3Fs: "
s3fs mybucket /home/files -o allow_other,nonempty -d -d
;;
esac
ftp - Dockerfile
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y \
openssh-server \
proftpd-basic \
proftpd-mod-mysql
COPY proftpd.conf /etc/proftpd/proftpd.conf
COPY sftp.conf /etc/proftpd/conf.d/sftp.conf
COPY setup.sh /etc/proftpd/setup.sh
RUN chmod 500 /etc/proftpd/setup.sh && /etc/proftpd/setup.sh
EXPOSE 2222
ENTRYPOINT ["/bin/sh", "/etc/proftpd/entrypoint.sh"]
You can mount s3 in your docker container in next way
1.Add to Dockerfile
RUN apt-get install -y fuse s3fs
RUN mkdir /root/.aws
RUN touch /root/.aws/.passwd-s3fs && chmod 600 /root/.aws/.passwd-s3fs
COPY entrypoint.sh ./
RUN chmod 700 entrypoint.sh
ENTRYPOINT entrypoint.sh
2.Create entrypoint.sh with next script
#!/bin/sh
echo "$AWS_CREDS" > /root/.aws/.passwd-s3fs
echo "$BUCKET_NAME /srv/files fuse.s3fs _netdev,allow_other,passwd_file=/root/.aws/.passwd-s3fs 0 0" > /etc/fstab
mount -a
<your old CMD or ENTRYPOINT>
3.In docker-compose.yml add next
<your-container-name>:
image: ...
build: ...
environment:
- AWS_ID="AKI..."
- AWS_KEY="omIE..."
- AWS_CREDS=AKI...:2uMZ...
- BUCKET_NAME=<YOUR backed name>
devices:
- "/dev/fuse"
cap_add:
- SYS_ADMIN
security_opt:
- seccomp:unconfined