I have been using the Vagrantfile below to create virtual machine and the same time install docker on the vms by simply running vagrant up.
Now this script fails whenever i make subsequent attempt to create virtual machine and install docker.
# -*- mode: ruby -*-
# vi: set ft=ruby :
boxes = [
{
:name => "manager1",
:eth1 => "192.168.205.10",
:mem => "512",
:cpu => "1"
},
{
:name => "manager2",
:eth1 => "192.168.205.11",
:mem => "512",
:cpu => "2"
},
{
:name => "manager3",
:eth1 => "192.168.205.12",
:mem => "512",
:cpu => "2"
},
{
:name => "worker1",
:eth1 => "192.168.205.13",
:mem => "1024",
:cpu => "2"
},
{
:name => "worker2",
:eth1 => "192.168.205.14",
:mem => "1024",
:cpu => "2"
},
{
:name => "worker3",
:eth1 => "192.168.205.15",
:mem => "1024",
:cpu => "2"
}
]
Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/xenial64"
config.vm.boot_timeout = 2000
config.vm.provider "vmware_fusion" do |v, override|
override.vm.box = "base"
end
# Turn off shared folders
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = opts[:mem]
v.vmx["numvcpus"] = opts[:cpu]
end
config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--name", opts[:name]]
v.customize ["modifyvm", :id, "--memory", opts[:mem]]
v.customize ["modifyvm", :id, "--cpus", opts[:cpu]]
end
config.vm.network :private_network, ip: opts[:eth1]
end
config.vm.provision "shell", inline: <<-SHELL
apt-get update
apt-get -y upgrade
apt-get -y install \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get -y install docker-ce docker-ce-cli containerd.io
getent group docker || groupadd docker
usermod -aG docker vagrant
service docker restart
SHELL
end
end
I keep having this error
manager1: E
manager1: :
manager1: Failed to fetch https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/containerd.io_1.6.8-1_amd64.deb Hash Sum mismatch
manager1:
manager1: Hashes of expected file:
manager1:
manager1: - SHA512:6eb0014637ad22d560eba5178b5d5cc54cdcc9ed3b272aebee3985f96738f1e7d40ed69894619b151255683489c22fd9ea70b162ace56798e298d9a0d8da67e2
manager1:
manager1: - SHA256:46f1029a59484e58f21ecc45ae58bbe90620e1616d73048aaa61169f9ef957af
manager1: - SHA1:7a18fb24a5cd293cdd0f2f7e1c2ffd2703f2e035 [weak]
manager1: - MD5Sum:85967f85ea77756f14fc57523a7c29d0 [weak]
manager1: - Filesize:28140650 [weak]
manager1: Hashes of received file:
manager1: - SHA512:d9ba9dc20038dc0310d3d61bdc55fca7ce7123caf1f1f4dde8a06ea47ec917cb64a94d49b9cf5dd918058a1a41dc501a8953b8aa2a2401b2a00329b9cd0d1b99
manager1: - SHA256:689afaad3eef20f553ffe1191e2fa0e7946da274968f3811d2cb21b50e73002c
manager1: - SHA1:f529a30f513ac3f896ad9d0f63a6e6e050095071 [weak]
manager1: - MD5Sum:43891c37db034c6a084ea04abb463f5a [weak]
manager1: - Filesize:28140650 [weak]
manager1: Last modification reported: Thu, 08 Sep 2022 20:43:28 +0000
manager1: E: Failed to fetch https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce-cli_20.10.18~3-0~ubuntu-focal_amd64.deb Hash Sum mismatch
manager1: Hashes of expected file:
manager1: - SHA512:be66b6bef0f9a64ba6669b2360882db0f5ed5f5d32f6a4bf0421b49e7dd7432cedd44e26d74dffa13f30ec3f70d4db96350f8411c6ee2523f7195848e7baeac2
manager1: - SHA256:995152b8e8ad73cff59ad2756c82ea40ab5634b862bcca1037ee945a811b99cf
manager1: - SHA1:be8ffe23e1f5ac53b559784376cea29e748a2998 [weak]
manager1: - MD5Sum:b0431ee9f86ad0d415d153a6ece57145 [weak]
manager1: - Filesize:41490986 [weak]
manager1: Hashes of received file:
manager1: - SHA512:1ecbac81e9eb947fe04fe9f61614af476e1a4893c4a31283af7e4c8ba89afe45bf27598a6cf36703cbd43cfe774748ec09b23baf0f78eebb6f9f70e4e42c82f7
manager1: - SHA256:037606a8f7a734f743759d1d5c36f3e81c1bd7e7409d38e5322cddf3a20f9621
manager1: - SHA1:be8ffe23e1f5ac53b559784376cea29e748a2998 [weak]
manager1: - MD5Sum:b0431ee9f86ad0d415d153a6ece57145 [weak]
manager1: - Filesize:41490986 [weak]
manager1: Last modification reported: Fri, 09 Sep 2022 09:01:03 +0000
manager1: E: Unable to fetch some archives, maybe run apt-get update or try with --fix-missing?
manager1: Failed to restart docker.service: Unit docker.service not found.
The SSH command responded with a non-zero exit status. Vagrant
assumes that this means the command failed. The output for this command
should be in the log above. Please read the output to determine what
went wrong.
And I have modified the Vagrantfile to something like this below
# -*- mode: ruby -*-
# vi: set ft=ruby :
boxes = [
{
:name => "manager1",
:eth1 => "192.168.205.10",
:mem => "512",
:cpu => "1"
},
{
:name => "manager2",
:eth1 => "192.168.205.11",
:mem => "512",
:cpu => "2"
},
{
:name => "manager3",
:eth1 => "192.168.205.12",
:mem => "512",
:cpu => "2"
},
{
:name => "worker1",
:eth1 => "192.168.205.13",
:mem => "1024",
:cpu => "2"
},
{
:name => "worker2",
:eth1 => "192.168.205.14",
:mem => "1024",
:cpu => "2"
},
{
:name => "worker3",
:eth1 => "192.168.205.15",
:mem => "1024",
:cpu => "2"
}
]
Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/focal64"
config.vm.boot_timeout = 2000
config.vm.provider "vmware_fusion" do |v, override|
override.vm.box = "base"
end
# Turn off shared folders
config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true
boxes.each do |opts|
config.vm.define opts[:name] do |config|
config.vm.hostname = opts[:name]
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = opts[:mem]
v.vmx["numvcpus"] = opts[:cpu]
end
config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--name", opts[:name]]
v.customize ["modifyvm", :id, "--memory", opts[:mem]]
v.customize ["modifyvm", :id, "--cpus", opts[:cpu]]
end
config.vm.network :private_network, ip: opts[:eth1]
end
config.vm.provision "shell", inline: <<-SHELL
apt-get clean
apt-get update
apt-get -y upgrade
apt-get -y install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" > /etc/apt/sources.list.d/docker.list
apt-get clean
apt-get update
curl -k -O -L https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-scan-plugin_0.17.0~ubuntu-focal_amd64.deb
curl -k -O -L https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce-rootless-extras_20.10.18~3-0~ubuntu-focal_amd64.deb
curl -k -O -L https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/containerd.io_1.6.8-1_amd64.deb
curl -k -O -L https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce-cli_20.10.18~3-0~ubuntu-focal_amd64.deb
curl -k -O -L https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce_20.10.18~3-0~ubuntu-focal_amd64.deb
sudo apt install ./docker-scan-plugin_0.17.0~ubuntu-focal_amd64.deb
sudo apt install ./docker-ce-rootless-extras_20.10.18~3-0~ubuntu-focal_amd64.deb
sudo apt install ./containerd.io_1.6.8-1_amd64.deb
sudo apt install ./docker-ce-cli_20.10.18~3-0~ubuntu-focal_amd64.deb
sudo apt install ./docker-ce_20.10.18~3-0~ubuntu-focal_amd64.deb
getent group docker || groupadd docker
usermod -aG docker vagrant
service docker restart
SHELL
end
end
Yet I am still having errors such as this in different positions of the installation progress display messages
manager1: Get:1 https://download.docker.com/linux/ubuntu focal/stable amd64 docker-scan-plugin amd64 0.17.0~ubuntu-focal [3521 kB]
manager1: dpkg-preconfigure: unable to re-open stdin: No such file or directory
..........................................................................
..........................................................................
..........................................................................
..........................................................................
manager1: Unpacking docker-scan-plugin (0.17.0~ubuntu-focal) ...
manager1: Setting up docker-scan-plugin (0.17.0~ubuntu-focal) ...
manager1: WARNING:
manager1: apt
manager1:
manager1: does not have a stable CLI interface.
manager1: Use with caution in scripts.
..........................................................................
..........................................................................
..........................................................................
manager1: - Filesize:8392456 [weak]
manager1: Last modification reported: Fri, 09 Sep 2022 09:01:04 +0000
manager1: Fetched 8392 kB in 14s (592 kB/s)
manager1: E
manager1: :
manager1: Failed to fetch https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce-rootless-extras_20.10.18~3-0~ubuntu-focal_amd64.deb Hash Sum mismatch
manager1:
manager1: Hashes of expected file:
manager1: - SHA512:a157e61be464612c83b5739792beec6bbf4ecffe581d26be30c1abbdf78f325569abea87ad2d7a1ed1ec0a7df3dd1c5209b6998368730d4de03b1ba9429dffa0
manager1: - SHA256:e7e3f7d874ef3b6a81a2bb453b013a8b902f44020cf98fb903f79643125da0e2
manager1: - SHA1:b6baad11031fbb46f452b26acd50b919409f66da [weak]
manager1: - MD5Sum:9e4d78d39526a215879bea52b869c65c [weak]
manager1: - Filesize:8392456 [weak]
manager1: Hashes of received file:
manager1: - SHA512:a157e61be464612c83b5739792beec6bbf4ecffe581d26be30c1abbdf78f325569abea87ad2d7a1ed1ec0a7df3dd1c5209b6998368730d4de03b1ba9429dffa0
manager1: - SHA256:abcaa85388bc4f8416101cf4141c1f5f262798206f12ac66501a4f75c16a7d6b
manager1: - SHA1:b6baad11031fbb46f452b26acd50b919409f66da [weak]
manager1: - MD5Sum:9e4d78d39526a215879bea52b869c65c [weak]
manager1: - Filesize:8392456 [weak]
manager1: Last modification reported: Fri, 09 Sep 2022 09:01:04 +0000
manager1: E: Unable to fetch some archives, maybe run apt-get update or try with --fix-missing?
manager1: WARNING:
manager1: apt
manager1:
manager1: does not have a stable CLI interface.
manager1: Use with caution in scripts.
manager1: Reading package lists...
manager1: E
manager1: :
manager1: Invalid archive member header
manager1: E
manager1: :
manager1: Could not read meta data from /home/vagrant/containerd.io_1.6.8-1_amd64.deb
manager1: E
manager1: : The package lists or status file could not be parsed or opened.
manager1: WARNING:
manager1: apt
manager1:
manager1: does not have a stable CLI interface.
manager1: Use with caution in scripts.
......................................................
.....................................................
....................................................
manager1: - SHA1:ad6d369560655b309a9108bc218e49ae54dfa90c [weak]
manager1: - MD5Sum:b0431ee9f86ad0d415d153a6ece57145 [weak]
manager1: - Filesize:41490986 [weak]
manager1: Last modification reported: Fri, 09 Sep 2022 09:01:03 +0000
manager1: Fetched 41.5 MB in 56s (742 kB/s)
manager1: E
manager1: :
manager1: Failed to fetch https://download.docker.com/linux/ubuntu/dists/focal/pool/stable/amd64/docker-ce-cli_20.10.18~3-0~ubuntu-focal_amd64.deb Hash Sum mismatch
manager1:
manager1: Hashes of expected file:
manager1:
manager1: - SHA512:be66b6bef0f9a64ba6669b2360882db0f5ed5f5d32f6a4bf0421b49e7dd7432cedd44e26d74dffa13f30ec3f70d4db96350f8411c6ee2523f7195848e7baeac2
manager1:
manager1: - SHA256:995152b8e8ad73cff59ad2756c82ea40ab5634b862bcca1037ee945a811b99cf
manager1:
manager1: - SHA1:be8ffe23e1f5ac53b559784376cea29e748a2998 [weak]
manager1:
manager1: - MD5Sum:b0431ee9f86ad0d415d153a6ece57145 [weak]
manager1:
manager1: - Filesize:41490986 [weak]
manager1:
manager1: Hashes of received file:
manager1:
manager1: - SHA512:d1c450b1bc78c44e2da788129b3bf4812811b40c176fe7069f7ec2928d29aee8027a758266e360e1c232846902379b7516cb8dde6ce721d2f07b15e642461f84
manager1:
manager1: - SHA256:be60933dfbca1778c175423cad6efcf018532ec515705fef105c1c0ee7b2c3da
manager1:
manager1: - SHA1:ad6d369560655b309a9108bc218e49ae54dfa90c [weak]
manager1:
manager1: - MD5Sum:b0431ee9f86ad0d415d153a6ece57145 [weak]
manager1:
manager1: - Filesize:41490986 [weak]
manager1:
manager1: Last modification reported: Fri, 09 Sep 2022 09:01:03 +0000
manager1: E
manager1: :
manager1: Unable to fetch some archives, maybe run apt-get update or try with --fix-missing?
manager1: WARNING:
manager1: apt
manager1:
manager1: does not have a stable CLI interface.
manager1: Use with caution in scripts.
...............................
..............................
manager1: Do you want to continue? [Y/n] Abort.
manager1: Failed to restart docker.service: Unit docker.service not found.
The SSH command responded with a non-zero exit status. Vagrant
assumes that this means the command failed. The output for this command
should be in the log above. Please read the output to determine what
went wrong.
Please I need help to enable me create vms and install docker.
Put the following in your provisioning script:
export DEBIAN_FRONTEND=noninteractive
before any apt-get calls.
See also: https://serverfault.com/questions/500764/dpkg-reconfigure-unable-to-re-open-stdin-no-file-or-directory?newreg=ab3bcf217d7e4a42bbbd82c3ce852392
Related
I created my own dockerfile (ubuntu:xenial) using environment variables. This dockerfile uses php7.0-fpm php7.0-xml php7.0-mbstring php-mysql
The dockerfiles contains:
ENV MYSQL_HOST=192.168.0.2
ENV MYSQL_DBNAME=dbname_xyz
ENV MYSQL_USERNAME=username_xyz
ENV MYSQL_PASSWORD=password_xyz
...
RUN echo "clear_env = no" >> /etc/php/7.0/fpm/pool.d/www.conf
in the server.php I'm trying to use those variables, but they aren't known obviously
$host = $_SERVER["MYSQL_HOST"];
$dbname = $_SERVER["MYSQL_DBNAME"];
$username = $_SERVER["MYSQL_USERNAME"];
$password = $_SERVER["MYSQL_PASSWORD"];
$pdo = new \PDO("mysql:host=$host;dbname=$dbname", $username, $password);
Running that shows error:
FastCGI sent in stderr: "PHP message: PHP Notice: Undefined index: MYSQL_HOST in /var/webdav/server.php on line 4
Executing on: container-shell shows correct value
php -r "echo getenv('MYSQL_HOST');"
env | grep MYSQL
Any suggestions what I've to change?
UPDATE 20211215 after AymDEV's feedback
UPDATE 20211215 after piotrekkr's feedback
Full dockerfile:
FROM ubuntu:xenial
MAINTAINER me#whatever.us
# Changing WEBDAV_PASSWORD doesn't work
# MYSQL_x aren't known to server.php
ENV WEBDAV_USERNAME=admin
ENV WEBDAV_PASSWORD=admin
ENV MYSQL_HOST=192.168.0.2
ENV MYSQL_DBNAME=dbname_xyz
ENV MYSQL_USERNAME=username_xyz
ENV MYSQL_PASSWORD=password_xyz
# Defaults
WORKDIR /var/webdav
VOLUME /var/webdav/public
VOLUME /var/webdav/data
# Install zip
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y zip unzip php-zip
# Install nginx with php7 support
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y nginx php7.0-fpm php7.0-xml php7.0-mbstring php-mysql && \
rm -rf /var/lib/apt/lists/*
# Install SabreDAV
RUN php -r "readfile('http://getcomposer.org/installer');" > composer-setup.php && \
php composer-setup.php --install-dir=/usr/bin --filename=composer && \
php -r "unlink('composer-setup.php');" && \
composer require sabre/dav ~3.2.2 && \
rm /usr/bin/composer
# Set up entrypoint
COPY /scripts/install.sh /install.sh
# Configure nginx
COPY /config/nginx/default /etc/nginx/sites-enabled/default
COPY /config/nginx/fastcgi_params /etc/nginx/fastcgi_params
# forward request and error logs to docker log collector
RUN ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stderr /var/log/nginx/error.log
# copy server.php for client -- sabredav communication
COPY /web/server.php /var/webdav/server.php
#make environment variables available to php
RUN echo "clear_env = no" >> /etc/php/7.0/fpm/pool.d/www.conf
#nginx will be process with PID=1
RUN echo "daemon off;" >> /etc/nginx/nginx.conf
CMD /install.sh && service php7.0-fpm start && nginx
full server.php
<?php
date_default_timezone_set('Europe/Berlin');
$baseUri = '/';
$host = $_ENV["MYSQL_HOST"];
$dbname = $_ENV["MYSQL_DBNAME"];
$username = $_ENV["MYSQL_USERNAME"];
$password = $_ENV["MYSQL_PASSWORD"];
$pdo = new \PDO("mysql:host=$host;dbname=$dbname", $username, $password);
$pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
require_once 'vendor/autoload.php';
$authBackend = new \Sabre\DAV\Auth\Backend\PDO($pdo);
$principalBackend = new \Sabre\DAVACL\PrincipalBackend\PDO($pdo);
$carddavBackend = new \Sabre\CardDAV\Backend\PDO($pdo);
$caldavBackend = new \Sabre\CalDAV\Backend\PDO($pdo);
$nodes = [
new \Sabre\CalDAV\Principal\Collection($principalBackend),
new \Sabre\CalDAV\CalendarRoot($principalBackend, $caldavBackend),
new \Sabre\CardDAV\AddressBookRoot($principalBackend, $carddavBackend),
];
$server = new \Sabre\DAV\Server($nodes);
if (isset($baseUri)) $server->setBaseUri($baseUri);
$server->addPlugin(new \Sabre\DAV\Auth\Plugin($authBackend));
$server->addPlugin(new \Sabre\DAV\Browser\Plugin());
$server->addPlugin(new \Sabre\CalDAV\Plugin());
$server->addPlugin(new \Sabre\CardDAV\Plugin());
$server->addPlugin(new \Sabre\DAV\Sync\Plugin());
$server->exec();
Actually - everything is working as designed. There are several reasons why you are not seeing the ENV variables and the approach you are using is a bit flawed.
TLDR: use docker-compose and split the FPM process into a separate container, with fpm as the entrypoint.
Why it doesn't work
Since this case is a bit complex - I will try to go step by step by the reasons why it's not working for you. Hopefully this will help.
Your PHP scripts are executed within the FPM workers. Each of these workers passes specific data (context) into PHP. That context you can see, for example, inside the $_SERVER variable:
Array
(
[LANGUAGE] =>
[LC_TIME] =>
[LC_CTYPE] =>
[LC_MONETARY] =>
[TERM] => xterm
[LC_COLLATE] =>
[PATH] => /sbin:/usr/sbin:/bin:/usr/bin
[LC_ADDRESS] =>
[LANG] =>
[LC_TELEPHONE] =>
[LC_MESSAGES] =>
[LC_NAME] =>
[LC_MEASUREMENT] =>
[LC_IDENTIFICATION] =>
[LC_ALL] =>
[PWD] => /
[LC_NUMERIC] =>
[LC_PAPER] =>
[USER] => www-data
[HOME] => /var/www
[HTTP_ACCEPT] => */*
[HTTP_USER_AGENT] => curl/7.47.0
[HTTP_HOST] => localhost
[REDIRECT_STATUS] => 200
[SERVER_NAME] => _
[SERVER_PORT] => 80
[SERVER_ADDR] => 127.0.0.1
[REMOTE_PORT] => 35542
[REMOTE_ADDR] => 127.0.0.1
[SERVER_SOFTWARE] => nginx/1.10.3
[GATEWAY_INTERFACE] => CGI/1.1
[REQUEST_SCHEME] => http
[SERVER_PROTOCOL] => HTTP/1.1
[DOCUMENT_ROOT] => /var/www/html
[DOCUMENT_URI] => /print.php
[REQUEST_URI] => /print.php
[SCRIPT_NAME] => /print.php
[CONTENT_LENGTH] =>
[CONTENT_TYPE] =>
[REQUEST_METHOD] => GET
[QUERY_STRING] =>
[SCRIPT_FILENAME] => /var/www/html/print.php
[PATH_INFO] =>
[FCGI_ROLE] => RESPONDER
[PHP_SELF] => /print.php
[REQUEST_TIME_FLOAT] => 1639586759.7522
[REQUEST_TIME] => 1639586759
)
If you print out the same variable from the CLI - the result will be quite different (I guess you already observed that):
Array
(
[HOSTNAME] => 580747313ddc
[TERM] => xterm
[MYSQL_PASSWORD] => password_xyz
[LS_COLORS] => rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:
[WEBDAV_USERNAME] => admin
[MYSQL_DBNAME] => dbname_xyz
[PATH] => /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
[PWD] => /var/www/html
[WEBDAV_PASSWORD] => admin
[MYSQL_USERNAME] => username_xyz
[SHLVL] => 1
[HOME] => /root
[MYSQL_HOST] => 192.168.0.2
[_] => /usr/bin/php
[OLDPWD] => /etc/nginx
[PHP_SELF] => print.php
[SCRIPT_NAME] => print.php
[SCRIPT_FILENAME] => print.php
[PATH_TRANSLATED] => print.php
[DOCUMENT_ROOT] =>
[REQUEST_TIME_FLOAT] => 1639586851.762
[REQUEST_TIME] => 1639586851
[argv] => Array
(
[0] => print.php
)
[argc] => 1
)
The ENV is properly passed into the container and if you execute processes in the container - the ENV is available to them. This is seen in the second example above. But, if you look at the processes running in your container:
root#580747313ddc:/var/www/html# ps -aux
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 1872 420 ? Ss 16:20 0:00 /bin/sh -c service php7.0-fpm start && nginx
root 37 0.0 0.4 45560 9208 ? S 16:20 0:00 nginx: master process nginx
root 43 0.0 0.1 3628 2908 pts/0 Ss 16:21 0:01 bash
www-data 3154 0.0 0.1 45932 4032 ? S 16:38 0:00 nginx: worker process
www-data 3155 0.0 0.1 45932 3232 ? S 16:38 0:00 nginx: worker process
www-data 3156 0.0 0.1 45932 3232 ? S 16:38 0:00 nginx: worker process
www-data 3157 0.0 0.1 45932 3232 ? S 16:38 0:00 nginx: worker process
www-data 3158 0.0 0.1 45932 3232 ? S 16:38 0:00 nginx: worker process
root 3217 0.0 0.5 121364 10324 ? Ss 16:41 0:00 php-fpm: master process (/etc/php/7.0/fpm/php-fpm.conf)
www-data 3218 0.0 0.4 121708 9920 ? S 16:41 0:00 php-fpm: pool www
www-data 3219 0.0 0.4 121708 9512 ? S 16:41 0:00 php-fpm: pool www
root 3233 0.0 0.1 5472 2380 pts/0 R+ 16:51 0:00 ps -aux
you can examine the environment of each one of them, see process with PID=1 (the entrypoint of the container):
root#580747313ddc:/var/www/html# cat /proc/1/environ
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binHOSTNAME=580747313ddcWEBDAV_USERNAME=adminWEBDAV_PASSWORD=adminMYSQL_HOST=192.168.0.2MYSQL_DBNAME=dbname_xyzMYSQL_USERNAME=username_xyzMYSQL_PASSWORD=password_xyzHOME=/root
the environment has the ENVs that you created. All good here. But if you look at the FPM process:
root#580747313ddc:/var/www/html# cat /proc/3217/environ
root#580747313ddc:/var/www/html#
it's empty!
This is because you are running the FPM as a service (as in a systemd service), which means that you would have to define these environment variables within the service's configuration file, in this case /lib/systemd/system/php7.0-fpm.service.
It can be done, however - it wouldn't be very clean to do so.
What you can do to fix it
You can work around the problems above by simply using php-fpm as the entrypoint to your container. That way - it will have access to the ENV. This is already being done by the official php-fpm images and we can try to use them. Here's a minimal workable example, using docker-compose:
docker-compose.yml:
version: '3'
services:
web:
image: nginx
container_name: web
depends_on:
- php
links:
- php
volumes:
- ./html:/var/www/html
- ./conf:/etc/nginx/conf.d/
ports:
- "8080:80"
php:
image: php:7.4-fpm-alpine
environment:
- MYSQL_PASS=pass123
volumes:
- ./html:/var/www/html
Now, in the same folder create
html/print.php:
<?php print_r($_SERVER);?>
and conf/default.conf:
server {
listen 80;
listen [::]:80;
server_name localhost;
root /var/www/html;
location ~ \.php$ {
try_files $uri = 404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass php:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
include fastcgi_params;
}
}
with this - simply run docker-compose up -d and then curl localhost:8080/print.php.
This should give you the expected env variable MYSQL_PASS inside your $_SERVER array:
Array
(
[HOSTNAME] => 84a4d1e174d3
[PHP_INI_DIR] => /usr/local/etc/php
[SHLVL] => 1
[HOME] => /home/www-data
[PHP_LDFLAGS] => -Wl,-O1 -pie
[PHP_CFLAGS] => -fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
[PHP_VERSION] => 7.4.26
[GPG_KEYS] => 42670A7FE4D0441C8E4632349E4FDC074A4EF02D 5A52880781F755608BF815FC910DEB46F53EA312
[PHP_CPPFLAGS] => -fstack-protector-strong -fpic -fpie -O2 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
[PHP_ASC_URL] => https://www.php.net/distributions/php-7.4.26.tar.xz.asc
[MYSQL_PASS] => pass123
[PHP_URL] => https://www.php.net/distributions/php-7.4.26.tar.xz
[PATH] => /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
[WEBDAV_USER] => test
[PHPIZE_DEPS] => autoconf dpkg-dev dpkg file g++ gcc libc-dev make pkgconf re2c
[PWD] => /var/www/html
[PHP_SHA256] => e305b3aafdc85fa73a81c53d3ce30578bc94d1633ec376add193a1e85e0f0ef8
[USER] => www-data
[HTTP_ACCEPT] => */*
[HTTP_USER_AGENT] => curl/7.77.0
[HTTP_HOST] => localhost:8080
[PATH_INFO] =>
[SCRIPT_FILENAME] => /var/www/html/print.php
[REDIRECT_STATUS] => 200
[SERVER_NAME] => localhost
[SERVER_PORT] => 80
[SERVER_ADDR] => 172.18.0.3
[REMOTE_PORT] => 55288
[REMOTE_ADDR] => 172.18.0.1
[SERVER_SOFTWARE] => nginx/1.21.4
[GATEWAY_INTERFACE] => CGI/1.1
[REQUEST_SCHEME] => http
[SERVER_PROTOCOL] => HTTP/1.1
[DOCUMENT_ROOT] => /var/www/html
[DOCUMENT_URI] => /print.php
[REQUEST_URI] => /print.php
[SCRIPT_NAME] => /print.php
[CONTENT_LENGTH] =>
[CONTENT_TYPE] =>
[REQUEST_METHOD] => GET
[QUERY_STRING] =>
[FCGI_ROLE] => RESPONDER
[PHP_SELF] => /print.php
[REQUEST_TIME_FLOAT] => 1639591114.4143
[REQUEST_TIME] => 1639591114
[argv] => Array
(
)
[argc] => 0
)
I'm trying debug CLI script, and Xdebug can't connect to PhpStorm.
I see error Operation now in progress (29). in Xdebug remote log.
I'm sure Xdebug configured right, but I don't know how to debug PhpStorm.
Summary from phpinfo() generated by https://xdebug.org/wizard.php
Tailored Installation Instructions
Summary
Xdebug installed: 2.6.1
Server API: Command Line Interface
Windows: no
Zend Server: no
PHP Version: 7.1.24
Zend API nr: 320160303
PHP API nr: 20160303
Debug Build: no
Thread Safe Build: no
OPcache Loaded: no
Configuration File Path: /usr/local/etc/php
Configuration File: /usr/local/etc/php/php.ini
Extensions directory: /usr/local/lib/php/extensions/no-debug-non-zts-20160303
You're already running the latest Xdebug version
Xdebug log
Log opened at 2019-02-19 11:59:37
I: Connecting to configured address/port: 46.201.50.194:9000.
W: Creating socket for '46.201.50.194:9000', poll success, but error: Operation now in progress (29).
E: Could not connect to client. :-(
Log closed at 2019-02-19 11:59:37
Xdebug config
xdebug
xdebug support => enabled
Version => 2.6.1
IDE Key => PHPSTORM
Supported protocols
DBGp - Common DeBuGger Protocol
Directive => Local Value => Master Value
xdebug.auto_trace => Off => Off
xdebug.cli_color => 0 => 0
xdebug.collect_assignments => Off => Off
xdebug.collect_includes => On => On
xdebug.collect_params => 0 => 0
xdebug.collect_return => Off => Off
xdebug.collect_vars => Off => Off
xdebug.coverage_enable => On => On
xdebug.default_enable => On => On
xdebug.dump.COOKIE => no value => no value
xdebug.dump.ENV => no value => no value
xdebug.dump.FILES => no value => no value
xdebug.dump.GET => no value => no value
xdebug.dump.POST => no value => no value
xdebug.dump.REQUEST => no value => no value
xdebug.dump.SERVER => no value => no value
xdebug.dump.SESSION => no value => no value
xdebug.dump_globals => On => On
xdebug.dump_once => On => On
xdebug.dump_undefined => Off => Off
xdebug.extended_info => On => On
xdebug.file_link_format => no value => no value
xdebug.filename_format => no value => no value
xdebug.force_display_errors => Off => Off
xdebug.force_error_reporting => 0 => 0
xdebug.gc_stats_enable => Off => Off
xdebug.gc_stats_output_dir => /tmp => /tmp
xdebug.gc_stats_output_name => gcstats.%p => gcstats.%p
xdebug.halt_level => 0 => 0
xdebug.idekey => PHPSTORM => PHPSTORM
xdebug.max_nesting_level => 256 => 256
xdebug.max_stack_frames => -1 => -1
xdebug.overload_var_dump => 2 => 2
xdebug.profiler_aggregate => Off => Off
xdebug.profiler_append => Off => Off
xdebug.profiler_enable => Off => Off
xdebug.profiler_enable_trigger => Off => Off
xdebug.profiler_enable_trigger_value => no value => no value
xdebug.profiler_output_dir => /tmp => /tmp
xdebug.profiler_output_name => cachegrind.out.%p => cachegrind.out.%p
xdebug.remote_addr_header => no value => no value
xdebug.remote_autostart => On => On
xdebug.remote_connect_back => Off => Off
xdebug.remote_cookie_expire_time => 3600 => 3600
xdebug.remote_enable => On => On
xdebug.remote_handler => dbgp => dbgp
xdebug.remote_host => 127.0.0.1 => localhost
xdebug.remote_log => /app/xdebug.log => no value
xdebug.remote_mode => req => req
xdebug.remote_port => 9000 => 9000
xdebug.remote_timeout => 200 => 200
xdebug.scream => Off => Off
xdebug.show_error_trace => Off => Off
xdebug.show_exception_trace => Off => Off
xdebug.show_local_vars => Off => Off
xdebug.show_mem_delta => Off => Off
xdebug.trace_enable_trigger => Off => Off
xdebug.trace_enable_trigger_value => no value => no value
xdebug.trace_format => 0 => 0
xdebug.trace_options => 0 => 0
xdebug.trace_output_dir => /tmp => /tmp
xdebug.trace_output_name => trace.%c => trace.%c
xdebug.var_display_max_children => 128 => 128
xdebug.var_display_max_data => 512 => 512
xdebug.var_display_max_depth => 3 => 3
Xdebug can't connect to PhpStorm because PhpStorm was not reachable by my external host ip.
How I debug it.
First I checked from docker container that PhpStorm listen port (9000 in my case)
nc -vz external_ip 9000
It got (tcp) failed: Connection refused
I try the same from the host and also got the error
Then I tried from host
nc -vz localhost 9000
And I got [tcp/*] succeeded!
So the problem in xdebug.remote_host not in PhpStorm probably
I found host ip in container
netstat -nr | grep '^0\.0\.0\.0' | awk '{print $2}'
Put it in xdebug.remote_host and now it works correctly
Thanks for #LazyOne
In my case I simply set the following xdebug conf:
xdebug.remote_connect_back=0
xdebug.remote_host=host.docker.internal
host.docker.internal should 'magically' find the host's IP (cf. documentation). Note that this requires Docker v18.03+ and currently only works on Mac and Windows hosts.
May be your xdebug.remote_port 9000 is using by php-fpm. You try changing other port. Example xdebug.remote_port=9001 and in launch.json file still port=9000
If someone doesn't want to hardcode or modify their docker, Dockerfile, or compose file (maybe someone is using a 2.x xdebug, it doesn't support environment variable). Adding the header X-Forwarded-For by using a header modifying extension on the browser so as to set $_SERVER['HTTP_X_FORWARDED_FOR'] is a very convenient approach. Don't forget the configuration xdebug.remote_connect_back=1.
Dockerfile
FROM php:7.1-apache
RUN yes | pecl install xdebug-2.5.5 \
&& echo "zend_extension=$(find /usr/local/lib/php/extensions/ -name xdebug.so)" > /usr/local/etc/php/conf.d/xdebug.ini \
&& echo "xdebug.remote_port=9000" >> /usr/local/etc/php/conf.d/xdebug.ini \
&& echo "xdebug.remote_enable=1" >> /usr/local/etc/php/conf.d/xdebug.ini \
&& echo "xdebug.remote_connect_back=1" >> /usr/local/etc/php/conf.d/xdebug.ini \
&& echo "xdebug.remote_log=/tmp/xdebug.log" >> /usr/local/etc/php/conf.d/xdebug.ini \
&& echo "xdebug.remote_autostart=off" >> /usr/local/etc/php/conf.d/xdebug.ini
ModHeader extension
xdebug.log
Log opened at 2021-07-06 16:06:59
I: Checking remote connect back address.
I: Checking header 'HTTP_X_FORWARDED_FOR'.
I: Checking header 'REMOTE_ADDR'.
I: Remote address found, connecting to 172.17.0.1:9000.
W: Creating socket for '172.17.0.1:9000', poll success, but error: Operation now in progress (29).
E: Could not connect to client. :-(
Log closed at 2021-07-06 16:06:59
Log opened at 2021-07-06 16:07:40
I: Checking remote connect back address.
I: Checking header 'HTTP_X_FORWARDED_FOR'.
I: Remote address found, connecting to 172.30.112.1:9000.
I: Connected to client. :-)
For me the fix to this error was simply clicking the phone icon in PhpStorm twice to disable and then re-enable listening for debug connections.
It shouldn't change anything but it worked because before that I fixed a bunch of other issues, one of them being that port 9003 was blocked by something else. Apparently PhpStorm doesn't warn when it can't use the specified port, so after resolving other issues it might be necessary to re-initiate listening for debug connections.
Have you configured the mapping from phpstorm in PHP->Server, you have to map the project file to the absolute path of the server eg. src -> "/var/www/html/test/src"
Also the xdebug file from the server that contains the ip address
I have opened "Windows Defender Firewall with Advanced Security" (Windows firewall settings) and have found that I have two rules, which appeared not clear from where, that block the phpStorm.
I disabled them - and the xDebugger started working for me.
I'm trying to install docker on Ubuntu 18.04-VM (via vagrant) using the setup below. Is there any way I can make docker installation succeed on vagrant ubuntu 18.04 VM using the Vagrantfile? Note: I need to know how to apply the suggested solution into the Vagrantfile.
Vagrantfile:
servers=[
{
:hostname => "manager",
:ip => "192.168.2.1",
:box => "ubuntu/bionic64",
:ram => 2048,
:cpu => 4
},
{
:hostname => "worker-1",
:ip => "192.168.2.2",
:box => "ubuntu/bionic64",
:ram => 2048,
:cpu => 4
},
{
:hostname => "worker-2",
:ip => "192.168.2.3",
:box => "ubuntu/bionic64",
:ram => 2048,
:cpu => 4
}
]
Vagrant.configure(2) do |config|
servers.each do |machine|
config.vm.define machine[:hostname] do |node|
node.vm.box = machine[:box]
node.vm.hostname = machine[:hostname]
node.vm.network "private_network", ip: machine[:ip]
if machine[:hostname] == "manager"
node.vm.provision "docker",
images: ["ubuntu/bionic64"]
else
node.vm.provision "docker"
end
node.vm.provider "virtualbox" do |vb|
vb.customize ["modifyvm", :id, "--memory", machine[:ram]]
end
end
end
end
Dockerfile:
FROM ubuntu:18.04
RUN apt-get install -y python python-pip --no-install-recommends
RUN apt-get install vim -y
RUN apt update -y
ADD app /home/app/
WORKDIR /home/app
EXPOSE 8080
Exception/Error Output Message:
The following SSH command responded with a non-zero exit status.
Vagrant assumes that this means the command failed!
curl -sSL https://get.docker.com/ | sh
Stdout from the command:
Executing docker install script, commit: 02d7c3c
Stderr from the command:
Either your platform is not easily detectable or is not supported by this
installer script.
Please visit the following URL for more detailed installation instructions:
https://docs.docker.com/engine/installation/
I finally figured it out spawning virtual servers with Ubuntu 18, using Vagrant. The link has all the simple instructions: Spawn virtual servers on the fly
Im trying to install docker-ce through puppet and i have a couple of questions.
1: Does apt::key automatically do a 'apt-get update' afterwards?
2: How can i use the apt:ppa module to add the docker-ce repository?
this is done with:
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
But how would i use the apt::ppa to include distribution and release?
This is the whole puppet block:
class docker {
$prerequisites = ['apt-transport-https', 'ca-certificates', 'curl']
package { $prerequisites: ensure => installed}
apt::key { 'docker-ce':
ensure => present,
id => '9DC858229FC7DD38854AE2D88D81803C0EBFCD88',
options => 'https://download.docker.com/linux/ubuntu/gpg',
}
apt::ppa {''}
package {'docker-ce': ensure => installed}
}
EDIT:
Ended up using the apt module with apt::source, hardcoded release because i know all my systems will run it.
class docker {
include apt
$prerequisites = ['apt-transport-https', 'ca-certificates']
package { $prerequisites: ensure => installed} ->
apt::key { 'docker-ce':
ensure => present,
id => '9DC858229FC7DD38854AE2D88D81803C0EBFCD88',
options => 'https://download.docker.com/linux/ubuntu/gpg',
} ->
apt::source {'docker-ce':
location => 'https://download.docker.com/linux/ubuntu',
release => 'xenial'
} ->
exec { 'apt-get-update':
command => '/usr/bin/apt-get update'
} ->
package {'docker-ce': ensure => installed}
}
Here's how I'm installing this:
apt::key { '9DC858229FC7DD38854AE2D88D81803C0EBFCD88':
source => 'https://download.docker.com/linux/ubuntu/gpg',
} ->
apt::source { 'docker-ce':
architecture => 'amd64',
location => 'https://download.docker.com/linux/ubuntu',
repos => 'stable',
release => $::lsbdistcodename,
} ->
package { 'docker-ce':
ensure => 'latest',
require => Exec['apt_update'],
}
When trying to run rspec spec/controllers/api_controller_spec.rb:406 --color command on linux machine. We already have redis server running at port 6379 but as below error Unable to load rspec spec_helper configuration & getting error:
Code:
REDIS_PID = "/var/run/redis.pid"
REDIS_CACHE_PATH = "tmp/cache/"
Dir.mkdir "#{Rails.root}/tmp" unless Dir.exists? "#{Rails.root}/tmp"
Dir.mkdir "#{Rails.root}/tmp/pids" unless Dir.exists? "#{Rails.root}/tmp/pids"
Dir.mkdir "#{Rails.root}/tmp/cache" unless Dir.exists? "#{Rails.root}/tmp/cache"
config.before(:suite) do
redis_options = {
"daemonize" => 'yes',
"pidfile" => 7528,
"port" => 6379,
"timeout" => 300,
"save 900" => 1,
"save 300" => 1,
"save 60" => 10000,
"dbfilename" => "dump.rdb",
"dir" => REDIS_CACHE_PATH,
"loglevel" => "debug",
"logfile" => "stdout",
"databases" => 16
}.map { |k, v| "#{k} #{v}" }.join("\n")
`echo '#{redis_options}' | redis-server -`
end
config.after(:suite) do
%x{
cat #{REDIS_PID} | xargs kill -QUIT
rm -f #{REDIS_CACHE_PATH}dump.rdb
}
end
Error:
------------
------------
------------
Finished searching in 0.09762763977050781 seconds.
Request#<ActionController::TestRequest:0x00000004139f58>
^[[?1;2c^[[?1;2c^[[?1;2c^[[?1;2c^[[?1;2c^[[?1;2cFcat: /var/run/redis.pid: No such file or directory
usage: kill [ -s signal | -p ] [ -a ] pid ...
kill -l [ signal ]
------------
------------
------------
Restart the redis server redis-server stop/start. The redis.pid file generated and rspec is able to access the redis configuration during background processing.