I generated the certificate using Cert bat, there were no errors, configured nginx, does not allow https access to the site.
My nginx.conf
server {
listen 80;
server_name cars4me.ru;
server_tokens off;
root /var/www/html;
index index.php;
#location /.well-known/acme-challenge/ {
# root /var/www/certbot;
#}
location ^~ /.well-known/acme-challenge {
root /var/www/certbot;
default_type "text/plain";
try_files $uri =404;
}
#location / {
# try_files $uri $uri/ /index.php?$args;
#}
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
server_name cars4me.ru;
server_tokens off;
root /var/www/html;
index index.php;
ssl_certificate /etc/letsencrypt/live/cars4me.ru/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/cars4me.ru/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files $uri $uri/ /index.php?$args;
}
# pass the PHP scripts to FastCGI server listening on wordpress:9000
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass wordpress:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
}
}
My docker-compose.yml
version: '3'
services:
wordpress-db:
image: mysql:5.7
volumes:
- ../db:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: somewordpress
MYSQL_DATABASE: wordpress
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress
restart: always
wordpress:
image: wordpress:php7.4-fpm-alpine
volumes:
- ../src:/var/www/html
- ../conf:/usr/local/etc/php
depends_on:
- wordpress-db
environment:
WORDPRESS_DB_HOST: wordpress-db:3306
WORDPRESS_DB_NAME: wordpress
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_PASSWORD: wordpress
links:
- wordpress-db
restart: always
user: "33:82"
nginx:
image: nginx:alpine
volumes:
- ../nginx:/etc/nginx/conf.d
- ../src:/var/www/html
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
ports:
- 80:80
- 443:443
links:
- wordpress
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
certbot:
image: certbot/certbot
volumes:
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
My init-letsencrypt.sh
version: '3'
services:
wordpress-db:
image: mysql:5.7
volumes:
- ../db:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: somewordpress
MYSQL_DATABASE: wordpress
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress
restart: always
wordpress:
image: wordpress:php7.4-fpm-alpine
volumes:
- ../src:/var/www/html
- ../conf:/usr/local/etc/php
depends_on:
- wordpress-db
environment:
WORDPRESS_DB_HOST: wordpress-db:3306
WORDPRESS_DB_NAME: wordpress
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_PASSWORD: wordpress
links:
- wordpress-db
restart: always
user: "33:82"
nginx:
image: nginx:alpine
volumes:
- ../nginx:/etc/nginx/conf.d
- ../src:/var/www/html
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
ports:
- 80:80
- 443:443
links:
- wordpress
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
certbot:
image: certbot/certbot
volumes:
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
root#enid:/home/bitbucket/www/wordpress/docker3# ls
data docker-compose.yml init-letsencrypt.sh
root#enid:/home/bitbucket/www/wordpress/docker3# cat init-letsencrypt.sh
#!/bin/bash
if ! [ -x "$(command -v docker-compose)" ]; then
echo 'Error: docker-compose is not installed.' >&2
exit 1
fi
domains=(cars4me.ru www.cars4me.ru)
rsa_key_size=4096
data_path="./data/certbot"
email="deemax3x3#mail.ru" # Adding a valid address is strongly recommended
staging=1 # Set to 1 if you're testing your setup to avoid hitting request limits
if [ -d "$data_path" ]; then
read -p "Existing data found for $domains. Continue and replace existing certificate? (y/N) " decision
if [ "$decision" != "Y" ] && [ "$decision" != "y" ]; then
exit
fi
fi
if [ ! -e "$data_path/conf/options-ssl-nginx.conf" ] || [ ! -e "$data_path/conf/ssl-dhparams.pem" ]; then
echo "### Downloading recommended TLS parameters ..."
mkdir -p "$data_path/conf"
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/_internal/tls_configs/options-ssl-nginx.conf > "$data_path/conf/options-ssl-nginx.conf"
curl -s https://raw.githubusercontent.com/certbot/certbot/master/certbot/certbot/ssl-dhparams.pem > "$data_path/conf/ssl-dhparams.pem"
echo
fi
echo "### Creating dummy certificate for $domains ..."
path="/etc/letsencrypt/live/$domains"
mkdir -p "$data_path/conf/live/$domains"
docker-compose run --rm --entrypoint "\
openssl req -x509 -nodes -newkey rsa:$rsa_key_size -days 1\
-keyout '$path/privkey.pem' \
-out '$path/fullchain.pem' \
-subj '/CN=localhost'" certbot
echo
echo "### Starting nginx ..."
docker-compose up --force-recreate -d nginx
echo
echo "### Deleting dummy certificate for $domains ..."
docker-compose run --rm --entrypoint "\
rm -Rf /etc/letsencrypt/live/$domains && \
rm -Rf /etc/letsencrypt/archive/$domains && \
rm -Rf /etc/letsencrypt/renewal/$domains.conf" certbot
echo
echo "### Requesting Let's Encrypt certificate for $domains ..."
#Join $domains to -d args
domain_args=""
for domain in "${domains[#]}"; do
domain_args="$domain_args -d $domain"
done
# Select appropriate email arg
case "$email" in
"") email_arg="--register-unsafely-without-email" ;;
*) email_arg="--email $email" ;;
esac
# Enable staging mode if needed
if [ $staging != "0" ]; then staging_arg="--staging"; fi
docker-compose run --rm --entrypoint "\
certbot certonly -v --webroot -w /var/www/certbot \
$staging_arg \
$email_arg \
$domain_args \
--rsa-key-size $rsa_key_size \
--agree-tos \
--force-renewal" certbot
echo
echo "### Reloading nginx ..."
docker-compose exec nginx nginx -s reload
When i start bash init-letsencrypt.sh there no errors:
Existing data found for cars4me.ru. Continue and replace existing certificate? (y/N) y
Creating dummy certificate for cars4me.ru ...
Creating docker3_certbot_run ... doneGenerating a RSA private key.....................++++....................................................................................................................................++++writing new private key to '/etc/letsencrypt/live/cars4me.ru/privkey.pem'
Starting nginx ...
Recreating docker3_wordpress-db_1 ... doneRecreating docker3_wordpress_1 ... doneRecreating docker3_nginx_1 ... done
Deleting dummy certificate for cars4me.ru ...
Creating docker3_certbot_run ... done
Requesting Let's Encrypt certificate for cars4me.ru ...
Creating docker3_certbot_run ... doneSaving debug log to /var/log/letsencrypt/letsencrypt.logPlugins selected: Authenticator webroot, Installer NoneRequesting a certificate for cars4me.ru and www.cars4me.ru
Successfully received certificate.Certificate is saved at: /etc/letsencrypt/live/cars4me.ru/fullchain.pemKey is saved at: /etc/letsencrypt/live/cars4me.ru/privkey.pemThis certificate expires on 2023-02-13.These files will be updated when the certificate renews.
NEXT STEPS:
The certificate will need to be renewed before it expires. Certbot can automatically renew the certificate in the background, but you may need to take steps to enable that functionality. See https://certbot.org/renewal-setup for instructions.
Reloading nginx ...
2022/11/15 09:34:02 [notice] 10#10: signal process started
I check SSL in ssllabs.com and saw not trusted:
enter image description here
What could be the problem?
Related
I used the following procedure to install Elastic Search and Kibana 8 RC2 :
# create the data directory on host for persistence
mkdir -p /data/elasticsearch/data
chmod 777 -R /data/elasticsearch/data
# create the elastic network
docker network create elastic
# run elastic search in background with data persistence in a single node configuration and test password
docker run -d -it --name els01 --net elastic -p 0.0.0.0:9200:9200 -p 0.0.0.0:9300:9300 -v /data/elasticsearch/data:/data/elasticsearch/data -e "discovery.type=single-node" -e ELASTIC_PASSWORD="test" -e KIBANA_PASSWORD="test" docker.elastic.co/elasticsearch/elasticsearch:8.0.0-rc2
# run kibana in background
docker run -d -it --name kib01 --net elastic -p 0.0.0.0:5601:5601 -e KIBANA_PASSWORD="test" -e "ELASTICSEARCH_HOSTS=https://els01:9200" docker.elastic.co/kibana/kibana:8.0.0-rc2
On the webpage http://10.0.2.1:5601/, I get this error :
Kibana server is not ready yet.
docker logs --tail 50 --follow --timestamps f82efc804e9c returns this error :
Unable to retrieve version information from Elasticsearch nodes. self signed certificate in certificate chain
What do I need to change in order to have a functionnal Kibana on Elastic Search ?
Thanks
TLDR;
Elasticsearch 8 comes with SSL/TLS enabled by default
Kibana has to have the CA certificate to verify and connect to elasticsearch.
To solve
How about you use the docker-compose file provided to set up a multi-cluster node. It will take care of the SSL/TLS certificate.
You will just need to
delete services es02 and es03
update volumes path
to be back on your actual set up.
This could look like
version: "2.2"
services:
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
- type: bind
source: /data/elasticsearch/data
target: /usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
es02:
condition: service_healthy
es03:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
volumes:
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: ${MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
volumes:
certs:
driver: local
esdata01:
driver: local
kibanadata:
driver: local
My version for single node configuration and without env variables:
version: "2.2"
services:
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:8.3.3
volumes:
- ./volumes/data/es/certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ ! -f certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:E0glIRkUbVak8f4aHZe -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"E0glIRkUbVak8f4aHZk\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: docker.elastic.co/elasticsearch/elasticsearch:8.3.3
volumes:
- ./volumes/data/es/certs:/usr/share/elasticsearch/config/certs
- ./volumes/data/es/es01:/usr/share/elasticsearch/data
ports:
- 9200:9200
environment:
- node.name=es01
- discovery.type=single-node
- ELASTIC_PASSWORD=<ELASTIC_PASSWORD>
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=basic
mem_limit: 1073741824
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:8.3.3
volumes:
- ./volumes/data/es/certs:/usr/share/kibana/config/certs
- ./volumes/data/es/kibanadata:/usr/share/kibana/data
ports:
- 5601:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=<KIBANA_SYSTEM_PASSWORD>
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
mem_limit: 1073741824
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
I'm trying to set up Hyperledger Sawtooth with 4 nodes in PoET consensus with custom transaction processors connected with each validator with Express App as a proxy to connect with the REST-API engine in docker containers with matrices in Grafana. The validator is not generating the transaction receipt hence, the payload is not processed by my custom Transaction Processor. The batch_status API always gives PENDING status.
Full project link: https://github.com/Shritesh99/Evidence-Management-System
Please Help!!!
Here is my sawtooth-poet.yaml docker-compose file.
version: "3.7"
volumes:
poet-shared:
services:
# -------------=== shell ===-------------
shell:
image: "hyperledger/sawtooth-shell:chime"
container_name: ems-shell
volumes:
- poet-shared:/poet-shared
stop_signal: SIGKILL
entrypoint: |
bash -c "
if [ ! -f /root/.sawtooth/keys/root.priv ]; then
sawtooth keygen
fi;
tail -f /dev/null
"
# -------------=== rest api proxy ===-------------
rest-api-proxy:
build: ./proxy
container_name: ems-rest-api-proxy
volumes:
- ./proxy:/proxy
- /proxy/node_modules
expose:
- 4000
ports:
- "4000:4000"
depends_on:
- rest-api-0
# -------------=== validators ===-------------
validator-0:
image: "hyperledger/sawtooth-validator:chime"
container_name: ems-validator-0
expose:
- 4004
- 5050
- 8800
volumes:
- ./metrices/validator.toml:/etc/sawtooth/validator.toml
- poet-shared:/poet-shared
entrypoint: "bash -c \"\
sawadm keygen --force && \
mkdir -p /poet-shared/validator-0 || true && \
cp -a /etc/sawtooth/keys /poet-shared/validator-0/ && \
while [ ! -f /poet-shared/poet-enclave-measurement ]; do sleep 1; done && \
while [ ! -f /poet-shared/poet-enclave-basename ]; do sleep 1; done && \
while [ ! -f /poet-shared/poet.batch ]; do sleep 1; done && \
cp /poet-shared/poet.batch / && \
sawset genesis \
-k /etc/sawtooth/keys/validator.priv \
-o config-genesis.batch && \
sawset proposal create \
-k /etc/sawtooth/keys/validator.priv \
sawtooth.consensus.algorithm.name=PoET \
sawtooth.consensus.algorithm.version=0.1 \
sawtooth.poet.report_public_key_pem=\
\\\"$$(cat /poet-shared/simulator_rk_pub.pem)\\\" \
sawtooth.poet.valid_enclave_measurements=$$(cat /poet-shared/poet-enclave-measurement) \
sawtooth.poet.valid_enclave_basenames=$$(cat /poet-shared/poet-enclave-basename) \
-o config.batch && \
sawset proposal create \
-k /etc/sawtooth/keys/validator.priv \
sawtooth.poet.target_wait_time=5 \
sawtooth.poet.initial_wait_time=25 \
sawtooth.publisher.max_batches_per_block=100 \
-o poet-settings.batch && \
sawadm genesis \
config-genesis.batch config.batch poet.batch poet-settings.batch && \
sawtooth-validator -v \
--bind network:tcp://eth0:8800 \
--bind component:tcp://eth0:4004 \
--bind consensus:tcp://eth0:5050 \
--peering static \
--endpoint tcp://validator-0:8800 \
--scheduler parallel \
--network-auth trust
\""
environment:
PYTHONPATH: "/project/sawtooth-core/consensus/poet/common:\
/project/sawtooth-core/consensus/poet/simulator:\
/project/sawtooth-core/consensus/poet/core"
stop_signal: SIGKILL
validator-1:
image: "hyperledger/sawtooth-validator:chime"
container_name: ems-validator-1
expose:
- 4004
- 5050
- 8800
volumes:
- ./metrices/validator.toml:/etc/sawtooth/validator.toml
- poet-shared:/poet-shared
entrypoint: |
bash -c "
sawadm keygen --force && \
mkdir -p /poet-shared/validator-1 || true && \
cp -a /etc/sawtooth/keys /poet-shared/validator-1/ && \
sawtooth-validator -v \
--bind network:tcp://eth0:8800 \
--bind component:tcp://eth0:4004 \
--bind consensus:tcp://eth0:5050 \
--peering static \
--endpoint tcp://validator-1:8800 \
--peers tcp://validator-0:8800 \
--scheduler parallel \
--network-auth trust
"
environment:
PYTHONPATH: "/project/sawtooth-core/consensus/poet/common:\
/project/sawtooth-core/consensus/poet/simulator:\
/project/sawtooth-core/consensus/poet/core"
stop_signal: SIGKILL
validator-2:
image: "hyperledger/sawtooth-validator:chime"
container_name: ems-validator-2
expose:
- 4004
- 5050
- 8800
volumes:
- ./metrices/validator.toml:/etc/sawtooth/validator.toml
- poet-shared:/poet-shared
entrypoint: |
bash -c "
sawadm keygen --force && \
mkdir -p /poet-shared/validator-2 && \
cp -a /etc/sawtooth/keys /poet-shared/validator-2/ && \
sawtooth-validator -v \
--bind network:tcp://eth0:8800 \
--bind component:tcp://eth0:4004 \
--bind consensus:tcp://eth0:5050 \
--peering static \
--endpoint tcp://validator-2:8800 \
--peers tcp://validator-0:8800,tcp://validator-1:8800 \
--scheduler parallel \
--network-auth trust
"
environment:
PYTHONPATH: "/project/sawtooth-core/consensus/poet/common:\
/project/sawtooth-core/consensus/poet/simulator:\
/project/sawtooth-core/consensus/poet/core"
stop_signal: SIGKILL
validator-3:
image: "hyperledger/sawtooth-validator:chime"
container_name: ems-validator-3
expose:
- 4004
- 5050
- 8800
volumes:
- ./metrices/validator.toml:/etc/sawtooth/validator.toml
- poet-shared:/poet-shared
entrypoint: |
bash -c "
sawadm keygen --force && \
mkdir -p /poet-shared/validator-3 && \
cp -a /etc/sawtooth/keys /poet-shared/validator-3/ && \
sawtooth-validator -v \
--bind network:tcp://eth0:8800 \
--bind component:tcp://eth0:4004 \
--bind consensus:tcp://eth0:5050 \
--peering static \
--endpoint tcp://validator-3:8800 \
--peers tcp://validator-0:8800,tcp://validator-1:8800,tcp://validator-2:8800 \
--scheduler parallel \
--network-auth trust
"
environment:
PYTHONPATH: "/project/sawtooth-core/consensus/poet/common:\
/project/sawtooth-core/consensus/poet/simulator:\
/project/sawtooth-core/consensus/poet/core"
stop_signal: SIGKILL
validator-4:
image: "hyperledger/sawtooth-validator:chime"
container_name: ems-validator-4
expose:
- 4004
- 5050
- 8800
volumes:
- ./metrices/validator.toml:/etc/sawtooth/validator.toml
- poet-shared:/poet-shared
entrypoint: |
bash -c "
sawadm keygen --force && \
mkdir -p /poet-shared/validator-4 && \
cp -a /etc/sawtooth/keys /poet-shared/validator-4/ && \
sawtooth-validator -v \
--bind network:tcp://eth0:8800 \
--bind component:tcp://eth0:4004 \
--bind consensus:tcp://eth0:5050 \
--peering static \
--endpoint tcp://validator-4:8800 \
--peers tcp://validator-0:8800,tcp://validator-1:8800,tcp://validator-2:8800,tcp://validator-3:8800 \
--scheduler parallel \
--network-auth trust
"
environment:
PYTHONPATH: "/project/sawtooth-core/consensus/poet/common:\
/project/sawtooth-core/consensus/poet/simulator:\
/project/sawtooth-core/consensus/poet/core"
stop_signal: SIGKILL
# -------------=== rest api ===-------------
rest-api-0:
image: "hyperledger/sawtooth-rest-api:chime"
container_name: ems-rest-api-0
volumes:
- ./metrices/rest_api.toml:/etc/sawtooth/rest_api.toml
depends_on:
- validator-0
entrypoint: "sawtooth-rest-api -C tcp://validator-0:4004 --bind rest-api-0:8008 -v"
stop_signal: SIGKILL
rest-api-1:
image: "hyperledger/sawtooth-rest-api:chime"
container_name: ems-rest-api-1
volumes:
- ./metrices/rest_api.toml:/etc/sawtooth/rest_api.toml
depends_on:
- validator-1
entrypoint: "sawtooth-rest-api -C tcp://validator-1:4004 --bind rest-api-1:8008 -v"
stop_signal: SIGKILL
rest-api-2:
image: "hyperledger/sawtooth-rest-api:chime"
container_name: ems-rest-api-2
volumes:
- ./metrices/rest_api.toml:/etc/sawtooth/rest_api.toml
depends_on:
- validator-2
entrypoint: "sawtooth-rest-api -C tcp://validator-2:4004 --bind rest-api-2:8008 -v"
stop_signal: SIGKILL
rest-api-3:
image: "hyperledger/sawtooth-rest-api:chime"
container_name: ems-rest-api-3
volumes:
- ./metrices/rest_api.toml:/etc/sawtooth/rest_api.toml
depends_on:
- validator-3
entrypoint: "sawtooth-rest-api -C tcp://validator-3:4004 --bind rest-api-3:8008 -v"
stop_signal: SIGKILL
rest-api-4:
image: "hyperledger/sawtooth-rest-api:chime"
container_name: ems-rest-api-4
volumes:
- ./metrices/rest_api.toml:/etc/sawtooth/rest_api.toml
depends_on:
- validator-4
entrypoint: "sawtooth-rest-api -C tcp://validator-4:4004 --bind rest-api-4:8008 -v"
stop_signal: SIGKILL
# -------------=== settings tp ===-------------
settings-tp-0:
image: "hyperledger/sawtooth-settings-tp:chime"
container_name: ems-settings-tp-0
depends_on:
- validator-0
entrypoint: "settings-tp -v -C tcp://validator-0:4004"
stop_signal: SIGKILL
settings-tp-1:
image: "hyperledger/sawtooth-settings-tp:chime"
container_name: ems-settings-tp-1
depends_on:
- validator-1
entrypoint: "settings-tp -v -C tcp://validator-1:4004"
stop_signal: SIGKILL
settings-tp-2:
image: "hyperledger/sawtooth-settings-tp:chime"
container_name: ems-settings-tp-2
depends_on:
- validator-2
entrypoint: "settings-tp -v -C tcp://validator-2:4004"
stop_signal: SIGKILL
settings-tp-3:
image: "hyperledger/sawtooth-settings-tp:chime"
container_name: ems-settings-tp-3
depends_on:
- validator-3
entrypoint: "settings-tp -v -C tcp://validator-3:4004"
stop_signal: SIGKILL
settings-tp-4:
image: "hyperledger/sawtooth-settings-tp:chime"
container_name: ems-settings-tp-4
depends_on:
- validator-4
entrypoint: "settings-tp -v -C tcp://validator-4:4004"
stop_signal: SIGKILL
# -------------=== poet engines ===-------------
poet-engine-0:
image: "hyperledger/sawtooth-poet-engine:chime"
container_name: ems-poet-engine-0
volumes:
- poet-shared:/poet-shared
depends_on:
- validator-0
entrypoint: |
bash -c "
if [ ! -f /poet-shared/poet-enclave-measurement ]; then
poet enclave measurement >> /poet-shared/poet-enclave-measurement; \
fi &&
if [ ! -f /poet-shared/poet-enclave-basename ]; then
poet enclave basename >> /poet-shared/poet-enclave-basename;
fi &&
if [ ! -f /poet-shared/simulator_rk_pub.pem ]; then
cp /etc/sawtooth/simulator_rk_pub.pem /poet-shared;
fi &&
while [ ! -f /poet-shared/validator-0/keys/validator.priv ];
do sleep 1; done
cp -a /poet-shared/validator-0/keys /etc/sawtooth &&
poet registration create -k /etc/sawtooth/keys/validator.priv -o /poet-shared/poet.batch &&
poet-engine -C tcp://validator-0:5050 --component tcp://validator-0:4004
"
poet-engine-1:
image: "hyperledger/sawtooth-poet-engine:chime"
container_name: ems-poet-engine-1
volumes:
- poet-shared:/poet-shared
depends_on:
- validator-1
entrypoint: |
bash -c "
while [ ! -f /poet-shared/validator-1/keys/validator.priv ];
do sleep 1; done
cp -a /poet-shared/validator-1/keys /etc/sawtooth &&
poet-engine -C tcp://validator-1:5050 --component tcp://validator-1:4004
"
poet-engine-2:
image: "hyperledger/sawtooth-poet-engine:chime"
container_name: ems-poet-engine-2
volumes:
- poet-shared:/poet-shared
depends_on:
- validator-2
entrypoint: |
bash -c "
while [ ! -f /poet-shared/validator-2/keys/validator.priv ];
do sleep 1; done
cp -a /poet-shared/validator-2/keys /etc/sawtooth &&
poet-engine -C tcp://validator-2:5050 --component tcp://validator-2:4004
"
poet-engine-3:
image: "hyperledger/sawtooth-poet-engine:chime"
container_name: ems-poet-engine-3
volumes:
- poet-shared:/poet-shared
depends_on:
- validator-3
entrypoint: |
bash -c "
while [ ! -f /poet-shared/validator-3/keys/validator.priv ];
do sleep 1; done
cp -a /poet-shared/validator-3/keys /etc/sawtooth &&
poet-engine -C tcp://validator-3:5050 --component tcp://validator-3:4004
"
poet-engine-4:
image: "hyperledger/sawtooth-poet-engine:chime"
container_name: ems-poet-engine-4
volumes:
- poet-shared:/poet-shared
depends_on:
- validator-4
entrypoint: |
bash -c "
while [ ! -f /poet-shared/validator-4/keys/validator.priv ];
do sleep 1; done
cp -a /poet-shared/validator-4/keys /etc/sawtooth &&
poet-engine -C tcp://validator-4:5050 --component tcp://validator-4:4004
"
# -------------=== poet validator registry ===-------------
poet-validator-registry-tp-0:
image: "hyperledger/sawtooth-poet-validator-registry-tp:chime"
container_name: ems-poet-validator-registry-tp-0
depends_on:
- validator-0
entrypoint: "poet-validator-registry-tp -C tcp://validator-0:4004 -v"
environment:
PYTHONPATH: /project/sawtooth-core/consensus/poet/common
stop_signal: SIGKILL
poet-validator-registry-tp-1:
image: "hyperledger/sawtooth-poet-validator-registry-tp:chime"
container_name: ems-poet-validator-registry-tp-1
depends_on:
- validator-1
entrypoint: "poet-validator-registry-tp -C tcp://validator-1:4004 -v"
environment:
PYTHONPATH: /project/sawtooth-core/consensus/poet/common
stop_signal: SIGKILL
poet-validator-registry-tp-2:
image: "hyperledger/sawtooth-poet-validator-registry-tp:chime"
container_name: ems-poet-validator-registry-tp-2
depends_on:
- validator-2
entrypoint: "poet-validator-registry-tp -C tcp://validator-2:4004 -v"
environment:
PYTHONPATH: /project/sawtooth-core/consensus/poet/common
stop_signal: SIGKILL
poet-validator-registry-tp-3:
image: "hyperledger/sawtooth-poet-validator-registry-tp:chime"
container_name: ems-poet-validator-registry-tp-3
depends_on:
- validator-3
entrypoint: "poet-validator-registry-tp -C tcp://validator-3:4004 -v"
environment:
PYTHONPATH: /project/sawtooth-core/consensus/poet/common
stop_signal: SIGKILL
poet-validator-registry-tp-4:
image: "hyperledger/sawtooth-poet-validator-registry-tp:chime"
container_name: ems-poet-validator-registry-tp-4
depends_on:
- validator-4
entrypoint: "poet-validator-registry-tp -C tcp://validator-4:4004 -v"
environment:
PYTHONPATH: /project/sawtooth-core/consensus/poet/common
stop_signal: SIGKILL
# -------------=== EMS tps ===-------------
processor-0:
image: processor
container_name: ems-processor-0
build: ./processor
volumes:
- ./processor:/processor
- /processor/node_modules
depends_on:
- validator-0
environment:
- VALIDATOR_URL=tcp://validator-0:4004
processor-1:
image: processor
container_name: ems-processor-1
build: ./processor
volumes:
- ./processor:/processor
- /processor/node_modules
depends_on:
- validator-1
environment:
- VALIDATOR_URL=tcp://validator-1:4004
processor-2:
image: processor
container_name: ems-processor-2
build: ./processor
volumes:
- ./processor:/processor
- /processor/node_modules
depends_on:
- validator-2
environment:
- VALIDATOR_URL=tcp://validator-2:4004
processor-3:
image: processor
container_name: ems-processor-3
build: ./processor
volumes:
- ./processor:/processor
- /processor/node_modules
depends_on:
- validator-3
environment:
- VALIDATOR_URL=tcp://validator-3:4004
processor-4:
image: processor
container_name: ems-processor-4
build: ./processor
volumes:
- ./processor:/processor
- /processor/node_modules
depends_on:
- validator-4
environment:
- VALIDATOR_URL=tcp://validator-4:4004
Here are my validator-0 logs.
writing file: /etc/sawtooth/keys/validator.priv
writing file: /etc/sawtooth/keys/validator.pub
Generated config-genesis.batch
Processing config-genesis.batch...
Processing config.batch...
Processing poet.batch...
Processing poet-settings.batch...
Generating /var/lib/sawtooth/genesis.batch
[2020-12-02 14:04:18.313 WARNING (unknown file)] [src/pylogger.rs: 40] Started logger at level INFO
[2020-12-02 14:04:19.347 INFO path] Skipping path loading from non-existent config file: /etc/sawtooth/path.toml
[2020-12-02 14:04:19.347 INFO validator] Loading validator information from config: /etc/sawtooth/validator.toml
[2020-12-02 14:04:19.354 INFO path] Skipping path loading from non-existent config file: /etc/sawtooth/path.toml
[2020-12-02 14:04:19.356 INFO keys] Loading signing key: /etc/sawtooth/keys/validator.priv
[2020-12-02 14:04:19.360 INFO cli] sawtooth-validator (Hyperledger Sawtooth) version 1.2.5
[2020-12-02 14:04:19.360 INFO cli] config [path]: config_dir = "/etc/sawtooth"; config [path]: key_dir = "/etc/sawtooth/keys"; config [path]: data_dir = "/var/lib/sawtooth"; config [path]: log_dir = "/var/log/sawtooth"; config [path]: policy_dir = "/etc/sawtooth/policy"
[2020-12-02 14:04:19.361 INFO cli] Adding metrics reporter: url=http://192.168.29.153:8086, db=metrics
[2020-12-02 14:04:19.407 INFO state_verifier] Skipping state verification: chain head's state root is present
[2020-12-02 14:04:19.410 INFO cli] Starting validator with parallel scheduler
[2020-12-02 14:04:19.491 INFO interconnect] Listening on tcp://eth0:4004
[2020-12-02 14:04:19.496 INFO genesis] Producing genesis block from /var/lib/sawtooth/genesis.batch
[2020-12-02 14:04:19.502 INFO processor_manager] waiting for processor type sawtooth_settings: 1.0 to register
[2020-12-02 14:04:29.473 WARNING influx] Cannot write to 192.168.29.153: Bad Request
[2020-12-02 14:04:30.070 WARNING processor_handlers] Max occupancy was not provided by transaction processor: 5ff1b5d1495bc765a3b0436db794658cfcf0ce6af8875ed448a2db8a37d8e8103aab52e10cc7ffb17b35e27c45dfa2df38548f5393aefce91b369ba4ef622bad. Using default max occupancy: 10
[2020-12-02 14:04:30.070 INFO processor_handlers] registered transaction processor: connection_id=5ff1b5d1495bc765a3b0436db794658cfcf0ce6af8875ed448a2db8a37d8e8103aab52e10cc7ffb17b35e27c45dfa2df38548f5393aefce91b369ba4ef622bad, family=sawtooth_settings, version=1.0, namespaces=['000000'], max_occupancy=10
[2020-12-02 14:04:30.591 INFO processor_manager] waiting for processor type sawtooth_validator_registry: 1.0 to register
[2020-12-02 14:04:30.997 WARNING processor_handlers] Max occupancy was not provided by transaction processor: aaf4f8f70f7f076a594e63a7c3b81ca26f8658ceb76e763462a7edb8bcfdeb3eec095444c732628130ba13d2d0e73f4633c6e58be636c11c0fef25c016df27c3. Using default max occupancy: 10
[2020-12-02 14:04:31.001 INFO processor_handlers] registered transaction processor: connection_id=aaf4f8f70f7f076a594e63a7c3b81ca26f8658ceb76e763462a7edb8bcfdeb3eec095444c732628130ba13d2d0e73f4633c6e58be636c11c0fef25c016df27c3, family=sawtooth_validator_registry, version=1.0, namespaces=['6a4372'], max_occupancy=10
[2020-12-02 14:04:31.162 WARNING processor_handlers] Max occupancy was not provided by transaction processor: b0234249154584f591b355b68a48bc12ad73dfd0f08bd751bdcc640ed8fd0be3ccfbda04e723b3bb1db017533df7c49bbf64b9bfe3864da3c6e2f19d4adda2c9. Using default max occupancy: 10
[2020-12-02 14:04:31.163 INFO processor_handlers] registered transaction processor: connection_id=b0234249154584f591b355b68a48bc12ad73dfd0f08bd751bdcc640ed8fd0be3ccfbda04e723b3bb1db017533df7c49bbf64b9bfe3864da3c6e2f19d4adda2c9, family=evidence_management_system, version=0.0, namespaces=['d23299'], max_occupancy=10
[2020-12-02 14:04:32.595 INFO genesis] Genesis block created: 133d0d3c7d345c1f5e62de798fd698869a050b895f4df0531a69ddcbf3242eb820a08e4dd4329927c36ba0bf9aac605d511395e5ce51196e2737b26d0d7e061e (block_num:0, state:c8e91ecf6cebdc28b648429b585fba559368c7c1616e4e50ecbd35169ab94113, previous_block_id:0000000000000000)
[2020-12-02 14:04:32.679 INFO interconnect] Listening on tcp://eth0:5050
[2020-12-02 14:04:32.809 INFO interconnect] Listening on tcp://eth0:8800
[2020-12-02 14:04:32.872 INFO ffi] [src/journal/chain.rs: 946] Chain controller initialized with chain head: Block(id: 133d0d3c7d345c1f5e62de798fd698869a050b895f4df0531a69ddcbf3242eb820a08e4dd4329927c36ba0bf9aac605d511395e5ce51196e2737b26d0d7e061e, block_num: 0, state_root_hash: c8e91ecf6cebdc28b648429b585fba559368c7c1616e4e50ecbd35169ab94113, previous_block_id: 0000000000000000)
[2020-12-02 14:04:32.877 INFO ffi] [src/journal/publisher.rs: 172] Now building on top of block, Block(id: 133d0d3c7d345c1f5e62de798fd698869a050b895f4df0531a69ddcbf3242eb820a08e4dd4329927c36ba0bf9aac605d511395e5ce51196e2737b26d0d7e061e, block_num: 0, state_root_hash: c8e91ecf6cebdc28b648429b585fba559368c7c1616e4e50ecbd35169ab94113, previous_block_id: 0000000000000000)
[2020-12-02 14:04:39.123 INFO handlers] Consensus engine registered: PoET 0.1 (additional protocols: [name: "poet"
version: "0.1"
])
[2020-12-02 14:04:39.137 INFO proxy] Consensus engine activated: PoET 0.1
Here is my REST-API-0 logs.
[2020-12-02 14:04:36.477 INFO rest_api] Adding metrics reporter: url=http://192.168.29.153:8086, db=metrics
[2020-12-02 14:04:36.600 INFO messaging] Connecting to tcp://validator-0:4004
[2020-12-02 14:04:36.613 INFO rest_api] Creating handlers for validator at tcp://validator-0:4004
[2020-12-02 14:04:36.823 INFO rest_api] Starting REST API on rest-api-0:8008
[2020-12-02 14:04:40.635 INFO helpers] GET /batch_statuses?id=c965b70251bdb8005547812ae8655ebd43421e30b57602eb550f08cf119ba8c002084e89449c8130c94de8865a58eec2e7741e839168c570f9fb4a55659ed8be HTTP/1.1: 200 status, 594 size, in 0.018432 s
[2020-12-02 14:05:55.055 INFO helpers] OPTIONS /batches HTTP/1.1: 405 status, 231 size, in 0.000543 s
[2020-12-02 14:05:55.107 INFO helpers] POST /batches HTTP/1.1: 202 status, 367 size, in 0.020589 s
[2020-12-02 14:06:00.151 INFO helpers] GET /batch_statuses?id=05a44cae97e4356a104133391d1cb09c92c5c870dc50712738430107b505beec6baee39a0e2efabbc3d6dac2fea2b83c3edbcfce58ee2467e60555d81f0e8a09 HTTP/1.1: 200 status, 594 size, in 0.003084 s
[2020-12-02 14:06:05.166 INFO helpers] GET /batch_statuses?id=05a44cae97e4356a104133391d1cb09c92c5c870dc50712738430107b505beec6baee39a0e2efabbc3d6dac2fea2b83c3edbcfce58ee2467e60555d81f0e8a09 HTTP/1.1: 200 status, 594 size, in 0.003312 s
Here are Custom transaction processor's logs.
Initializing EMS handler for Evidence Management System
Connected to tcp://validator-0:4004
Registration of [evidence_management_system 0.0] succeeded
Received Ping
Received Ping
Edit - 1
Here is PoET-Engine-0 logs:
Writing key state for PoET public key: 027cb2ab...e715c7dc
Generating /poet-shared/poet.batch
If you have a docker container running certbot, but a nginx instance usign those certificates running on the host, how do you restart the host nginx from inside the docker container?
This is the running container
certbot:
image: certbot/dns-ovh
container_name: certbot
volumes:
- /etc/letsencrypt/:/etc/letsencrypt
- /var/lib/letsencrypt:/var/lib/letsencrypt
- /root/.secrets/certbot-ovh.ini:/root/.secrets/ovh-creds.ini
entrypoint: /bin/sh -c 'trap exit TERM; while :; do certbot renew sleep 12h & wait $${!}; done;'
You have to add a --post-hook to the renew command, which uses ssh to send the nginx reload command to the host.
For this to work, the container needs to be run with network_mode: "host"
then you need to isntall sshpass and openssh when starting/recreating the container. this is done with
apk add openssh sshpass
then in the post-hook you need to ssh into the host and reload nginx
sshpass -p 'your password' ssh -o 'StrictHostKeyChecking no' root#localhost 'systemctl reload nginx'
assuming you have root access. This uses sshpass to enter the password in ssh which skips the "do you want to add the fingerprint" message and sends the relaod command to localhost
putting this all into the docker-compose file looks like this:
certbot:
image: certbot/dns-ovh
container_name: certbot
network_mode: "host"
volumes:
- /etc/letsencrypt/:/etc/letsencrypt
- /var/lib/letsencrypt:/var/lib/letsencrypt
- /root/.secrets/certbot-ovh.ini:/root/.secrets/ovh-creds.ini
entrypoint: >
/bin/sh -c
'apk add openssh sshpass &&
trap exit TERM; while :;
do certbot renew --post-hook
"sshpass -p '"'"'your password'"'"' ssh -o '"'"'StrictHostKeyChecking no'"'"' root#localhost '"'"'systemctl reload nginx'"'"'";
sleep 12h & wait $${!}; done;'
the > here allows for writing as many indented lines as i want, without needing to add anotehr layer of escaping. it also combines the lines into one line later.
the '"'"' used here is used to escape the singe ' inside the --post-hook quotes, it closes the first single quote, opens a new double quote which contains a single quote, and then opens the single quote again.
I realised how badly written this question was so I have rewritten the whole ting together with a solution.
TLDR: I wanted a solution or suggestion on how to get letsencrypt certificates and keys retrieved by the docker certbot/certbot container to be readable by the nginx:latest container.
The reason it is not readable is that the certificates are stored in a folder, typically /etc/letsencrypt/archive/domain/certificates and the folder archive has owner set to root and group set to root with the mode 0700. In addition, the key also has owner set to root and group set to root with mode 0600.
The nginx container has pid 0 set to be nginx master process and run by root, but it spawns a worker process which need to read the certificates and keys. This worker process is owned by a unprivileged user.
DOCKER-COMPOSE config
---
version: '3'
services:
nginx:
container_name: nginx
image: nginx:latest
ports:
- 80:80
- 443:443
volumes:
- ./data/nginx/conf:/etc/nginx/conf.d
# The volume under is to provide the DHPARAM file.
- ./data/nginx/tls:/etc/pki/tls
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
# This reloads the certificates every 24h as long as the container is running
command: "/bin/sh -c 'while :; do sleep 24h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
# certbot:
# container_name: certbot
# image: certbot/certbot
# volumes:
# - ./data/certbot/conf:/etc/letsencrypt
# - ./data/certbot/www:/var/www/certbot
# depends_on:
# - nginx
# # This checks if the certificates need to be renewed every 12 hours.
# entrypoint: "/bin/sh -c \"trap exit TERM; while :; do certbot renew; #sleep 12h & wait $${!}; done;\""
NGINX config
server {
listen 80 default_server;
server_name _;
location /.well-known/acme-challenge/ {
allow all;
root /var/www/certbot;
}
location / {
return 301 https://$host$request_uri;
}
}
I have excluded unnecessary lines in the config. After doing the initial retrival of the certificates I will remove the comments in the yaml file so that the certbot container retrieves new certificates automatically the next time I do docker-compose up -d.
The command I ran after starting the nginx container.
docker run -it --rm \
-v /FQPN/certbot/conf:/etc/letsencrypt \
-v /FQPN/certbot/www:/var/www/certbot \
certbot/certbot certonly \
-m EMAILADDRESS \
--webroot \
--agree-tos \
--webroot-path=/var/www/certbot \
-d DOMAIN
With what you see above, I get valid certificates, but they are only readable by root.
I want this setup to retrieve new certificates when needed but if I manually change the ownership and mode on the folders/files which restrict this to root only, then those changes will be undone when new certificates are retrieved.
I want a solution so that the unprivileged nginx user can read those certificates and keys without without having to do manual work whenever new certificates are retrieved.
I checked if there were options in certbot which could be usefull. After doing certbot --help, I saw there exist a certbot -h all option which give you every single option for certbot.
In there I found a post-hook option which is only run when new certificates are successfully retrieved.
The solution was to change the following line in the docker-compose yaml file.
entrypoint: "/bin/sh -c \"trap exit TERM; while :; do certbot renew; #sleep 12h & wait $${!}; done;\""
I changed this to the following.
entrypoint: "/bin/sh -c \"trap exit TERM; while :; do certbot renew --post-hook 'chown root:NGINXUID /etc/letsencrypt/live /etc/letsencrypt/archive && chmod 750 /etc/letsencrypt/live /etc/letsencrypt/archive && chown root:NGINXUID /etc/letsencrypt/archive/DOMAIN/privkey*.pem && chmod 640 /etc/letsencrypt/archive/DOMAIN/privkey*.pem'; sleep 12h & wait $${!}; done;\""
I've configured Zookeeper and Kafka containers in a fig.yml file for Docker. Both containers start fine. But after sending a number of messages, my application /zk-client hangs. On checking zookeeper logs, I see the error:
Error Path:/brokers Error:KeeperErrorCode = NoNode for /brokers
My fig.yml is as follows:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
environment:
ZK_ADVERTISED_HOST_NAME: xx.xx.x.xxx
ZK_CONNECTION_TIMEOUT_MS: 6000
ZK_SYNC_TIME_MS: 2000
ZK_DATADIR: /path/to/data/zk/data/dir
kafka:
image: wurstmeister/kafka:0.8.2.0
ports:
- "xx.xx.x.xxx:9092:9092"
links:
- zookeeper:zk
environment:
KAFKA_ADVERTISED_HOST_NAME: xx.xx.x.xxx
KAFKA_LOG_DIRS: /home/svc_cis4/dl
volumes:
- /var/run/docker.sock:/var/run/docker.sock
I've searched for quite a while now, but I haven't got a solution yet. I've also tried setting the data directory in fig.yml using ZK_DATADIR: '/path/to/zk/data/dir' but it doesn't seem to help. Any assistance will be appreciated.
UPDATE
Content of /opt/kafka_2.10-0.8.2.0/config/server.properties:
broker.id=0
port=9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
num.partitions=1
num.recovery.threads.per.data.dir=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=false
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
The problems you are having are not related with zookeeper's data directory. The error Error Path:/brokers Error:KeeperErrorCode = NoNode for /brokers are due to your application cannot find any broker znode in zookeeper's data. This is happening probably because the kafka container is not connecting correctly with zookeeper, and looking to wurstmeister's images I think the problem may be related to variable KAFKA_ADVERTISED_HOST_NAME could be wrong. I don't know if there is a reason to assign that variable through a env variable that has to be passed, but from my point of view this is not a good approach. There are multiple ways to configure kafka (in fact there is no need to set advertised.host.name and you can leave it commented and kafka will take default hostname, which can be set with docker), but a fast solution using this would be editing start-kafka.sh and rebuilding the image:
#!/bin/bash
if [[ -z "$KAFKA_ADVERTISED_PORT" ]]; then
export KAFKA_ADVERTISED_PORT=$(docker port `hostname` 9092 | sed -r "s/.*:(.*)/\1/g")
fi
if [[ -z "$KAFKA_BROKER_ID" ]]; then
export KAFKA_BROKER_ID=$KAFKA_ADVERTISED_PORT
fi
if [[ -z "$KAFKA_LOG_DIRS" ]]; then
export KAFKA_LOG_DIRS="/kafka/kafka-logs-$KAFKA_BROKER_ID"
fi
if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,)
fi
if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
sed -r -i "s/^(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh
unset KAFKA_HEAP_OPTS
fi
for VAR in `env`
do
if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then
kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .`
env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"`
if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then
sed -r -i "s#(^|^#)($kafka_name)=(.*)#\2=${!env_var}#g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '#' char
else
echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties
fi
fi
done
###NEW###
IP=$(hostname --ip-address)
sed -i -e "s/^advertised.host.name.*/advertised.host.name=$IP/" $KAFKA_HOME/config/server.properties
###END###
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
If this doesn't solve your problem you can get more information starting a session inside the containers (i.e.: docker exec -it kafkadocker_kafka_1 /bin/bash for kafka's and docker exec -it kafkadocker_zookeeper_1 /bin/bash for zookeeper's), and there check kafka logs, or zookeeper console (/opt/zookeeper-3.4.6/bin/zkCli.sh)
The configuration that's been working for me without any issues for the last two days involves specifying host addresses for both Zookeeper and Kafka. My fig.yml content is:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "xx.xx.x.xxx:2181:2181"
kafka:
image: wurstmeister/kafka:0.8.2.0
ports:
- "9092:9092"
links:
- zookeeper:zk
environment:
KAFKA_ADVERTISED_HOST_NAME: xx.xx.x.xxx
KAFKA_NUM_REPLICA_FETCHERS: 4
...other env variables...
volumes:
- /var/run/docker.sock:/var/run/docker.sock
validator:
build: .
volumes:
- .:/host
entrypoint: /bin/bash
command: -c 'java -jar /host/app1.jar'
links:
- zookeeper:zk
- kafka
analytics:
build: .
volumes:
- .:/host
entrypoint: /bin/bash
command: -c 'java -jar /host/app2.jar'
links:
- zookeeper:zk
- kafka
loader:
build: .
volumes:
- .:/host
entrypoint: /bin/bash
command: -c 'java -jar /host/app3.jar'
links:
- zookeeper:zk
- kafka
And the accompanying Dockerfile content:
FROM ubuntu:trusty
MAINTAINER Wurstmeister
RUN apt-get update; apt-get install -y unzip openjdk-7-jdk wget git docker.io
RUN wget -q http://apache.mirrors.lucidnetworks.net/kafka/0.8.2.0/kafka_2.10-0.8.2.0.tgz -O /tmp/kafka_2.10-0.8.2.0.tgz
RUN tar xfz /tmp/kafka_2.10-0.8.2.0.tgz -C /opt
VOLUME ["/kafka"]
ENV KAFKA_HOME /opt/kafka_2.10-0.8.2.0
ADD start-kafka.sh /usr/bin/start-kafka.sh
ADD broker-list.sh /usr/bin/broker-list.sh
CMD start-kafka.sh