Can't connect to Elasticsearch in OpenCTI stack docker install - docker

I'm trying to make a Docker installation of OpenCTI work.
I've followed the instructions here https://github.com/OpenCTI-Platform/docker and I'm able to successfully create the Docker stack on Windows Desktop Docker.
Here is my docker-compose.yml file
`
version: '3'
services:
redis:
image: redis:6.2.6
restart: always
volumes:
- redisdata:/data
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.1
volumes:
- esdata:/usr/share/elasticsearch/data
environment:
- discovery.type=single-node
- xpack.ml.enabled=false
restart: always
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
ports:
- "9200:9200"
- "9300:9300"
minio:
image: minio/minio:RELEASE.2021-10-13T00-23-17Z
volumes:
- s3data:/data
ports:
- "9000:9000"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
command: server /data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
restart: always
rabbitmq:
image: rabbitmq:3.9-management
environment:
- RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER}
- RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS}
volumes:
- amqpdata:/var/lib/rabbitmq
restart: always
opencti:
image: opencti/platform:5.0.3
environment:
- NODE_OPTIONS=--max-old-space-size=8096
- APP__PORT=8080
- APP__ADMIN__EMAIL=${OPENCTI_ADMIN_EMAIL}
- APP__ADMIN__PASSWORD=${OPENCTI_ADMIN_PASSWORD}
- APP__ADMIN__TOKEN=${OPENCTI_ADMIN_TOKEN}
- APP__APP_LOGS__LOGS_LEVEL=error
- REDIS__HOSTNAME=redis
- REDIS__PORT=6379
- ELASTICSEARCH__URL=http://elasticsearch:9200
- MINIO__ENDPOINT=minio
- MINIO__PORT=9000
- MINIO__USE_SSL=false
- MINIO__ACCESS_KEY=${MINIO_ROOT_USER}
- MINIO__SECRET_KEY=${MINIO_ROOT_PASSWORD}
- RABBITMQ__HOSTNAME=rabbitmq
- RABBITMQ__PORT=5672
- RABBITMQ__PORT_MANAGEMENT=15672
- RABBITMQ__MANAGEMENT_SSL=false
- RABBITMQ__USERNAME=${RABBITMQ_DEFAULT_USER}
- RABBITMQ__PASSWORD=${RABBITMQ_DEFAULT_PASS}
- SMTP__HOSTNAME=${SMTP_HOSTNAME}
- SMTP__PORT=25
- PROVIDERS__LOCAL__STRATEGY=LocalStrategy
ports:
- "8080:8080"
depends_on:
- redis
- elasticsearch
- minio
- rabbitmq
restart: always
deploy:
placement:
constraints:
- "node.role==manager"
worker:
image: opencti/worker:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- WORKER_LOG_LEVEL=info
depends_on:
- opencti
deploy:
mode: replicated
replicas: 3
restart: always
connector-history:
image: opencti/connector-history:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${CONNECTOR_HISTORY_ID} # Valid UUIDv4
- CONNECTOR_TYPE=STREAM
- CONNECTOR_NAME=History
- CONNECTOR_SCOPE=history
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
restart: always
depends_on:
- opencti
connector-export-file-stix:
image: opencti/connector-export-file-stix:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${CONNECTOR_EXPORT_FILE_STIX_ID} # Valid UUIDv4
- CONNECTOR_TYPE=INTERNAL_EXPORT_FILE
- CONNECTOR_NAME=ExportFileStix2
- CONNECTOR_SCOPE=application/json
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
restart: always
depends_on:
- opencti
connector-export-file-csv:
image: opencti/connector-export-file-csv:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${CONNECTOR_EXPORT_FILE_CSV_ID} # Valid UUIDv4
- CONNECTOR_TYPE=INTERNAL_EXPORT_FILE
- CONNECTOR_NAME=ExportFileCsv
- CONNECTOR_SCOPE=text/csv
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
restart: always
depends_on:
- opencti
connector-import-file-stix:
image: opencti/connector-import-file-stix:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${CONNECTOR_IMPORT_FILE_STIX_ID} # Valid UUIDv4
- CONNECTOR_TYPE=INTERNAL_IMPORT_FILE
- CONNECTOR_NAME=ImportFileStix
- CONNECTOR_SCOPE=application/json,text/xml
- CONNECTOR_AUTO=false # Enable/disable auto-import of file
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
restart: always
depends_on:
- opencti
connector-import-report:
image: opencti/connector-import-report:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${CONNECTOR_IMPORT_REPORT_ID} # Valid UUIDv4
- CONNECTOR_TYPE=INTERNAL_IMPORT_FILE
- CONNECTOR_NAME=ImportReport
- CONNECTOR_SCOPE=application/pdf,text/plain
- CONNECTOR_AUTO=false # Enable/disable auto-import of file
- CONNECTOR_ONLY_CONTEXTUAL=true # Only extract data related to an entity (a report, a threat actor, etc.)
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
- IMPORT_REPORT_CREATE_INDICATOR=false
restart: always
depends_on:
- opencti
connector-alienvault:
image: opencti/connector-alienvault:5.0.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=2cf0a4fe-4ded-4931-901b-20e599b7f013
- CONNECTOR_ID=0c02a154-95a5-4624-b1cd-225582c12975
- CONNECTOR_TYPE=EXTERNAL_IMPORT
- CONNECTOR_NAME=AlienVault
- CONNECTOR_SCOPE=alienvault
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_UPDATE_EXISTING_DATA=false
- CONNECTOR_LOG_LEVEL=info
- ALIENVAULT_BASE_URL=https://otx.alienvault.com
- ALIENVAULT_API_KEY=8f261df53d1d06c095edcd6a4b6677a6f46f72cdfcbbc4e2794758b7dca1a51d
- ALIENVAULT_TLP=White
- ALIENVAULT_CREATE_OBSERVABLES=true
- ALIENVAULT_CREATE_INDICATORS=true
- ALIENVAULT_PULSE_START_TIMESTAMP=2021-05-01T00:00:00 # BEWARE! Could be a lot of pulses!
- ALIENVAULT_REPORT_TYPE=threat-report
- ALIENVAULT_REPORT_STATUS=New
- ALIENVAULT_GUESS_MALWARE=false # Use tags to guess malware.
- ALIENVAULT_GUESS_CVE=true # Use tags to guess CVE.
- ALIENVAULT_EXCLUDED_PULSE_INDICATOR_TYPES=FileHash-MD5,FileHash-SHA1 # Excluded Pulse indicator types.
- ALIENVAULT_ENABLE_RELATIONSHIPS=true # Enable/Disable relationship creation between SDOs.
- ALIENVAULT_ENABLE_ATTACK_PATTERNS_INDICATES=true # Enable/Disable "indicates" relationships between indicators and attack patterns
- ALIENVAULT_INTERVAL_SEC=1800
restart: always
volumes:
esdata:
s3data:
redisdata:
amqpdata:
`
and here is my .env file
`
OPENCTI_ADMIN_EMAIL=admin#opencti.io
OPENCTI_ADMIN_PASSWORD=admin
OPENCTI_ADMIN_TOKEN=2cf0a4fe-4ded-4931-901b-20e599b7f013
MINIO_ROOT_USER=RootUser
MINIO_ROOT_PASSWORD=RootPassword
RABBITMQ_DEFAULT_USER=DefaultUser
RABBITMQ_DEFAULT_PASS=DefaultPass
CONNECTOR_HISTORY_ID=168b9e3f-cbb4-4d06-91d8-073a20ce2453
CONNECTOR_EXPORT_FILE_STIX_ID=a1fed5cb-0b60-4756-95b0-f62fb67204af
CONNECTOR_EXPORT_FILE_CSV_ID=3bbb35ef-2168-4031-832b-451767f6715c
CONNECTOR_IMPORT_FILE_STIX_ID=53bf4c37-f196-401e-af63-e4a0e7194c2b
CONNECTOR_IMPORT_REPORT_ID=11c03aeb-8b8a-4c95-a531-f82b0aebb2ad
SMTP_HOSTNAME=172.17.0.1
`
After creating my Docker stack as I'm unable to connect to localhost:8080 I looked at the logs of the opencti/platform container and I get the following error
{"error":{"name":"ConfigurationError","_error":{},"_showLocations":false,"_showPath":false,"time_thrown":"2021-11-10T16:09:02.791Z","data":{"reason":"ElasticSearch seems down","http_status":500,"category":"technical","error":"connect ECONNREFUSED 172.22.0.3:9200"},"internalData":{}},"category":"APP","version":"5.0.3","level":"error","message":"[OPENCTI] Platform initialization fail","timestamp":"2021-11-10T16:09:02.793Z"}
/opt/opencti/build/src/config/errors.js:8
return new Exception();
^
ConfigurationError: A configuration error has occurred
at error (/opt/opencti/build/src/config/errors.js:8:10)
at ConfigurationError (/opt/opencti/build/src/config/errors.js:53:3)
at /opt/opencti/build/src/database/elasticSearch.js:190:15
at process.async (node:internal/process/task_queues:96:5)
at checkSystemDependencies (/opt/opencti/build/src/initialization.js:113:40)
at initialization (/opt/opencti/build/src/initialization.js:372:3)
at /opt/opencti/build/src/boot.js:7:16
which says that OpenCTI is unable to connect with the elasticsearch container.
Running curl on the host machine returns the following:
C:\Windows\system32>curl -X GET "localhost:9200/_cluster/health?pretty"
curl: (52) Empty reply from server
and running curl inside the elastic search container returns the following:
sh-4.4# curl -X GET "localhost:9200/_cluster/health?pretty"
curl: (7) Failed to connect to localhost port 9200: Connection refused
And finally here is the output of the Elastic container log
c:\Tools\OpenCTI\docker>docker logs docker_elasticsearch_1
WARNING: A terminally deprecated method in java.lang.System has been called
WARNING: System::setSecurityManager has been called by org.elasticsearch.bootstrap.Elasticsearch (file:/usr/share/elasticsearch/lib/elasticsearch-7.15.1.jar)
WARNING: Please consider reporting this to the maintainers of org.elasticsearch.bootstrap.Elasticsearch
WARNING: System::setSecurityManager will be removed in a future release
WARNING: A terminally deprecated method in java.lang.System has been called
WARNING: System::setSecurityManager has been called by org.elasticsearch.bootstrap.Security (file:/usr/share/elasticsearch/lib/elasticsearch-7.15.1.jar)
WARNING: Please consider reporting this to the maintainers of org.elasticsearch.bootstrap.Security
WARNING: System::setSecurityManager will be removed in a future release
{"type": "server", "timestamp": "2021-11-10T16:12:49,261Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "version[7.15.1], pid[8], build[default/docker/83c34f456ae29d60e94d886e455e6a3409bba9ed/2021-10-07T21:56:19.031608185Z], OS[Linux/5.10.16.3-microsoft-standard-WSL2/amd64], JVM[Eclipse Adoptium/OpenJDK 64-Bit Server VM/17/17+35]" }
{"type": "server", "timestamp": "2021-11-10T16:12:50,963Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "JVM home [/usr/share/elasticsearch/jdk], using bundled JDK [true]" }
{"type": "server", "timestamp": "2021-11-10T16:12:50,965Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "JVM arguments [-Xshare:auto, -Des.networkaddress.cache.ttl=60, -Des.networkaddress.cache.negative.ttl=10, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -XX:+ShowCodeDetailsInExceptionMessages, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dio.netty.allocator.numDirectArenas=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.locale.providers=SPI,COMPAT, --add-opens=java.base/java.io=ALL-UNNAMED, -XX:+UseG1GC, -Djava.io.tmpdir=/tmp/elasticsearch-3486636715162729323, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Des.cgroups.hierarchy.override=/, -Xms3170m, -Xmx3170m, -XX:MaxDirectMemorySize=1661992960, -XX:G1HeapRegionSize=4m, -XX:InitiatingHeapOccupancyPercent=30, -XX:G1ReservePercent=15, -Des.path.home=/usr/share/elasticsearch, -Des.path.conf=/usr/share/elasticsearch/config, -Des.distribution.flavor=default, -Des.distribution.type=docker, -Des.bundled_jdk=true]" }
{"type": "server", "timestamp": "2021-11-10T16:24:45,864Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "loaded module [aggs-matrix-stats]" }
{"type": "server", "timestamp": "2021-11-10T16:24:45,937Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "loaded module [analysis-common]" }
{"type": "server", "timestamp": "2021-11-10T16:24:45,937Z", "level": "INFO", "component": "o.e.p.PluginsService", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "loaded module [constant-keyword]" }
{"type": "server", "timestamp": "2021-11-10T16:28:55,893Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [40.5s/40538938900ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:28:56,461Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [21.1s/21140ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:29:09,291Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [21.1s/21189926100ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:29:09,730Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [13.2s/13272ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:29:09,808Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [13.2s/13271423200ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:29:40,059Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [21s/21002ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:29:41,083Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [21s/21002782100ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:31:25,568Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [1.3m/80947ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:31:27,646Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [1.3m/80946985800ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:40:11,402Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [11.9s/11968ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:40:16,415Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [11.9s/11967222000ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:40:17,438Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6s/6050ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:40:17,489Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6s/6050442500ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:42:07,964Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [9.1s/9115ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:42:08,922Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [9.1s/9115088600ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:45,356Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "execution of [ReschedulingRunnable{runnable=org.elasticsearch.watcher.ResourceWatcherService$ResourceMonitor#3eac944, interval=5s}] took [6225ms] which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:45,139Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6.2s/6226ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:45,809Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6.2s/6225665000ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:57,124Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6.7s/6719ms] on absolute clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:57,488Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "timer thread slept for [6.7s/6718734700ns] on relative clock which is above the warn threshold of [5000ms]" }
{"type": "server", "timestamp": "2021-11-10T16:44:57,181Z", "level": "WARN", "component": "o.e.t.ThreadPool", "cluster.name": "docker-cluster", "node.name": "329e6b6ab306", "message": "execution of [ReschedulingRunnable{runnable=org.elasticsearch.watcher.ResourceWatcherService$ResourceMonitor#4e9331e8, interval=1m}] took [6718ms] which is above the warn threshold of [5000ms]" }
Docker version is the following
Client:
Cloud integration: 1.0.17
Version: 20.10.8
API version: 1.41
Go version: go1.16.6
Git commit: 3967b7d
Built: Fri Jul 30 19:58:50 2021
OS/Arch: windows/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.8
API version: 1.41 (minimum version 1.12)
Go version: go1.16.6
Git commit: 75249d8
Built: Fri Jul 30 19:52:31 2021
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.4.9
GitCommit: e25210fe30a0a703442421b0f60afac609f950a3
runc:
Version: 1.0.1
GitCommit: v1.0.1-0-g4144b63
docker-init:
Version: 0.19.0
GitCommit: de40ad0
I don't have the faintest clue of what's going on! Please help!
Thank you in advance!
P.S. - I tried to use Elasticsearch's docker-compose.yml file to start another stack
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.1
container_name: es01
environment:
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es02,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- elastic
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.1
container_name: es02
environment:
- node.name=es02
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es03
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
networks:
- elastic
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.15.1
container_name: es03
environment:
- node.name=es03
- cluster.name=es-docker-cluster
- discovery.seed_hosts=es01,es02
- cluster.initial_master_nodes=es01,es02,es03
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
networks:
- elastic
volumes:
data01:
driver: local
data02:
driver: local
data03:
driver: local
networks:
elastic:
driver: bridge
and I'm receiving the same error
Please find the attached Diagnostics ID from docker
8A8E051B-440A-45B6-8AFC-1DCB7BB07FF3/20211110173108

Related

kube-Prometheus-stack - Grafana pod stuck at "CrashloopBackoff"

Trying to install kube-prometheus-stack (39.8.0) and everything went well, now there is a requirement - where grafana pod needs to have a persistent vol with oci-fss as a storage class, below is my values.yaml file.
grafana:
initChownData:
enabled: false
persistence:
enabled: true
type: pvc
storageClassName: oci-fss
accessModes:
- ReadWriteMany
size: 50Gi
finalizers:
- kubernetes.io/pvc-protection
Grafana pod status is:
pod/prometheus-grafana-54cdc8774f-blfqx 2/3 CrashLoopBackOff
Grafana pod logs:
ubectl logs -f pod/prometheus-grafana-54cdc8774f-blfqx -n prometheus
Defaulted container "grafana-sc-dashboard" out of: grafana-sc-dashboard, grafana-sc-datasources, grafana
{"time": "2022-11-11T08:07:40.625012+00:00", "level": "INFO", "msg": "Starting collector"}
{"time": "2022-11-11T08:07:40.625190+00:00", "level": "WARNING", "msg": "No folder annotation was provided, defaulting to k8s-sidecar-target-directory"}
{"time": "2022-11-11T08:07:40.625329+00:00", "level": "INFO", "msg": "Loading incluster config ..."}
{"time": "2022-11-11T08:07:40.626083+00:00", "level": "INFO", "msg": "Config for cluster api at 'https://10.96.0.1:443' loaded..."}
{"time": "2022-11-11T08:07:40.626199+00:00", "level": "INFO", "msg": "Unique filenames will not be enforced."}
{"time": "2022-11-11T08:07:40.626283+00:00", "level": "INFO", "msg": "5xx response content will not be enabled."}
it was filesystem storage issue, Grafana container was not able to write it's configuration to the storage, we can mark this as resolved

Bad Gateway when using Traefik in docker swarm

I'm currently struggling a lot to spin up a small traefik example on my docker swarm instance.
I started first with an docker-compose file for local development and everything is working as expected.
But when I define this as swarm file to bring that environment into production I always get an Bad Gateway from traefik.
After searching a lot about this it seems to be related to an networking issue from traefik since it tries to request between two different networks, but I'm not able to find the issue.
After certain iterations I tried to reproduce the Issue with "official" containers to provide an better example for other people.
So this is my traefik.yml
version: "3.7"
networks:
external:
external: true
services:
traefik:
image: "traefik:v2.8.1"
command:
- "--log.level=INFO"
- "--accesslog=true"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.swarmMode=true"
- "--providers.docker.exposedbydefault=false"
- "--providers.docker.network=external"
- "--entrypoints.web.address=:80"
- "--entrypoints.web.forwardedHeaders.insecure"
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
networks:
- external
deploy:
placement:
constraints: [node.role == manager]
host-app:
image: traefik/whoami
ports:
- "9000:80"
networks:
- external
deploy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.host-app.rule=PathPrefix(`/whoami`)"
- "traefik.http.services.host-app.loadbalancer.server.port=9000"
- "traefik.http.routers.host-app.entrypoints=web"
- "traefik.http.middlewares.host-app-stripprefix.stripprefix.prefixes=/"
- "traefik.http.routers.host-app.middlewares=host-app-stripprefix#docker"
- "traefik.docker.network=external"
The network is created with: docker network create -d overlay external
and I deploy the stack with docker stack deploy -c traefik.yml server
Until here no issues and everything spins up fine.
When I curl localhost:9000 I get the correct response:
curl localhost:9000
Hostname: 7aa77bc62b44
IP: 127.0.0.1
IP: 10.0.0.8
IP: 172.25.0.4
IP: 10.0.4.6
RemoteAddr: 10.0.0.2:35068
GET / HTTP/1.1
Host: localhost:9000
User-Agent: curl/7.68.0
Accept: */*
but on
curl localhost/whoami
Bad Gateway%
I always get the bad Gateway issue.
So I checked my network with docker network inspect external to ensure that both are running in the same network and this is the case.
[
{
"Name": "external",
"Id": "iianul6ua9u1f1bb8ibsnwkyc",
"Created": "2022-08-09T19:32:01.4491323Z",
"Scope": "swarm",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "10.0.4.0/24",
"Gateway": "10.0.4.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"7aa77bc62b440e32c7b904fcbd91aea14e7a73133af0889ad9e0c9f75f2a884a": {
"Name": "server_host-app.1.m2f5x8jvn76p2ssya692f4ydp",
"EndpointID": "5d5175b73f1aadf2da30f0855dc0697628801a31d37aa50d78a20c21858ccdae",
"MacAddress": "02:42:0a:00:04:06",
"IPv4Address": "10.0.4.6/24",
"IPv6Address": ""
},
"e23f5c2897833f800a961ab49a4f76870f0377b5467178a060ec938391da46c7": {
"Name": "server_traefik.1.v5g3af00gqpulfcac84rwmnkx",
"EndpointID": "4db5d69e1ad805954503eb31c4ece5a2461a866e10fcbf579357bf998bf3490b",
"MacAddress": "02:42:0a:00:04:03",
"IPv4Address": "10.0.4.3/24",
"IPv6Address": ""
},
"lb-external": {
"Name": "external-endpoint",
"EndpointID": "ed668b033450646629ca050e4777ae95a5a65fa12a5eb617dbe0c4a20d84be28",
"MacAddress": "02:42:0a:00:04:04",
"IPv4Address": "10.0.4.4/24",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.driver.overlay.vxlanid_list": "4100"
},
"Labels": {},
"Peers": [
{
"Name": "3cb3e7ba42dc",
"IP": "192.168.65.3"
}
]
}
]
and by checking the traefik logs I get the following
10.0.0.2 - - [09/Aug/2022:19:42:34 +0000] "GET /whoami HTTP/1.1" 502 11 "-" "-" 4 "host-app#docker" "http://10.0.4.9:9000" 0ms
which is the correct server:port for the whoami service. And even connecting into the traefik container and ping 10.0.4.9 works fine.
PING 10.0.4.9 (10.0.4.9): 56 data bytes
64 bytes from 10.0.4.9: seq=0 ttl=64 time=0.066 ms
64 bytes from 10.0.4.9: seq=1 ttl=64 time=0.057 ms
^C
--- 10.0.4.9 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.057/0.061/0.066 ms
This logs and snippets are all on my local swarm on Docker for Windows with wsl2 Ubuntu distribution. But I tested this on an CentOS Swarm which can be requested within my company and also with https://labs.play-with-docker.com/ and leads all to the same error.
So please can anybody tell me what configuration I'm missing or what mistake I made to get this running?
After consulting a coworker and creating another example we finally found the solution by our self.
Its just my own failure that I used the published port for loadbalancing the traefik to the service which is wrong.
host-app:
image: traefik/whoami
ports:
- "9000:80"
networks:
- external
deploy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.host-app.rule=PathPrefix(`/whoami`)"
- "traefik.http.services.host-app.loadbalancer.server.port=80" # <--- this was wrong
- "traefik.http.routers.host-app.entrypoints=web"
- "traefik.http.middlewares.host-app-stripprefix.stripprefix.prefixes=/"
- "traefik.http.routers.host-app.middlewares=host-app-stripprefix#docker"
- "traefik.docker.network=external"
and that's the reason for the Bad Gateway since traefik tries to reach the published port from the server which is not present internally.

Override parent image docker container command to do something after

I want to change a user password and run a SQL script against a DB2 container image. How do I do whatever the parent image called for, but then run a few commands after that completed? I need this to run using docker compose because the database will be used to support an acceptance test. In my docker-compose.yml file, I have a command property, but I checked the container and do not see the result of the touch statement, so it never ran.
My docker-compose.yml file is:
version: "3.2"
services:
ssc-file-generator-db2-test:
container_name: "ssc-file-generator-db2-test"
image: ibmcom/db2:latest
command: /bin/bash -c "touch /command-run && echo \"db2inst1:db2inst1\" | chpasswd && su db2inst1 && db2 -tvf /db2-test-scaffolding/init.sql"
hostname: db2server
privileged: true
# entrypoint: ["/bin/sh -c ]
ports:
- 50100:50000
- 55100:55000
networks:
- back-tier
restart: "no"
volumes:
- db2-test-scaffolding:/db2-test-scaffolding
env_file:
- acceptance-run.environment
# ssc-file-generator:
# container_name: "ssc-file-generator_testing"
# image: ssc-file-generator
# depends_on: ["ssc-file-generator-db2-test]
# command:
# env_file: ["acceptance-run.environment"]
networks:
back-tier: {}
volumes:
db2-test-scaffolding:
driver: local
driver_opts:
o: bind
type: none
device: ./db2-test-scaffolding
acceptance-run.environment
BCUPLOAD_DATASOURCE_DIALECT=org.hibernate.dialect.DB2Dialect
BCUPLOAD_DATASOURCE_DRIVER=com.ibm.db2.jcc.DB2Driver
BCUPLOAD_DATASOURCE_PASSWORD=bluecost
BCUPLOAD_DATASOURCE_URL=jdbc:db2://localhost:50100/mydb:currentSchema=FILE_GENERATOR
BCUPLOAD_DATASOURCE_USERNAME=bluecost
B2INSTANCE=db2inst1
DB2INST1_PASSWORD=db2inst1
DBNAME=MYDB
DEBUG_SECRETS=true
file-generator.test.files.path=src/test/acceptance/resources/files/
# Needed for DB2 container
LICENSE=accept
The docker image is
ibmcom/db2:latest
For convenience, this is the docker inspect ibmcom/db2:latest
[
{
"Id": "sha256:e304e217603b80b31c989574081b2badf210b4466c7f74cf32087ee0a1ba6e04",
"RepoTags": [
"ibmcom/db2:latest"
],
"RepoDigests": [
"ibmcom/db2#sha256:77da4492bf18c49a1012aa6071a16aee0039dca9c0a2a492345b6b030714a54f"
],
"Parent": "",
"Comment": "",
"Created": "2021-03-29T18:54:36.94484751Z",
"Container": "e59bda8065b72a0e440d145d6d90ba77231a514e811e66651d4fa6da98a34910",
"ContainerConfig": {
"Hostname": "6125cd0dc6e6",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"50000/tcp": {},
"55000/tcp": {},
"60006/tcp": {},
"60007/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"container=oci",
"STORAGE_DIR=/database",
"HADR_SHARED_DIR=/hadr",
"DBPORT=50000",
"TSPORT=55000",
"SETUPDIR=/var/db2_setup",
"SETUPAREA=/tmp/setup",
"NOTVISIBLE=in users profile",
"LICENSE_NAME=db2dec.lic"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) ",
"ENTRYPOINT [\"/var/db2_setup/lib/setup_db2_instance.sh\"]"
],
"Image": "sha256:e65b35603167c75a86515ef4af101a539cbbdf561bcb9efd656d17b8d867c7da",
"Volumes": {
"/database": {},
"/hadr": {}
},
"WorkingDir": "",
"Entrypoint": [
"/var/db2_setup/lib/setup_db2_instance.sh"
],
"OnBuild": [],
"Labels": {
"architecture": "x86_64",
"build-date": "2021-03-10T06:09:00.139818",
"com.redhat.build-host": "cpt-1007.osbs.prod.upshift.rdu2.redhat.com",
"com.redhat.component": "ubi7-container",
"com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
"description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
"distribution-scope": "public",
"io.k8s.description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
"io.k8s.display-name": "Red Hat Universal Base Image 7",
"io.openshift.tags": "base rhel7",
"name": "ubi7",
"release": "338",
"summary": "Provides the latest release of the Red Hat Universal Base Image 7.",
"url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi7/images/7.9-338",
"vcs-ref": "a4e710a688a6374670ecdd56637c3f683d11cbe3",
"vcs-type": "git",
"vendor": "Red Hat, Inc.",
"version": "7.9"
}
},
"DockerVersion": "19.03.6",
"Author": "db2_download_and_go",
"Config": {
"Hostname": "6125cd0dc6e6",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"50000/tcp": {},
"55000/tcp": {},
"60006/tcp": {},
"60007/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"container=oci",
"STORAGE_DIR=/database",
"HADR_SHARED_DIR=/hadr",
"DBPORT=50000",
"TSPORT=55000",
"SETUPDIR=/var/db2_setup",
"SETUPAREA=/tmp/setup",
"NOTVISIBLE=in users profile",
"LICENSE_NAME=db2dec.lic"
],
"Cmd": null,
"Image": "sha256:e65b35603167c75a86515ef4af101a539cbbdf561bcb9efd656d17b8d867c7da",
"Volumes": {
"/database": {},
"/hadr": {}
},
"WorkingDir": "",
"Entrypoint": [
"/var/db2_setup/lib/setup_db2_instance.sh"
],
"OnBuild": [],
"Labels": {
"architecture": "x86_64",
"build-date": "2021-03-10T06:09:00.139818",
"com.redhat.build-host": "cpt-1007.osbs.prod.upshift.rdu2.redhat.com",
"com.redhat.component": "ubi7-container",
"com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI",
"description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
"distribution-scope": "public",
"io.k8s.description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.",
"io.k8s.display-name": "Red Hat Universal Base Image 7",
"io.openshift.tags": "base rhel7",
"name": "ubi7",
"release": "338",
"summary": "Provides the latest release of the Red Hat Universal Base Image 7.",
"url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi7/images/7.9-338",
"vcs-ref": "a4e710a688a6374670ecdd56637c3f683d11cbe3",
"vcs-type": "git",
"vendor": "Red Hat, Inc.",
"version": "7.9"
}
},
"Architecture": "amd64",
"Os": "linux",
"Size": 2778060115,
"VirtualSize": 2778060115,
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/49d83ba2eb50cdbfc5a9e3a7b4baf907a9b4326aa0710689f602bb3cff01d820/diff:/var/lib/docker/overlay2/ba54659cc4ec10fa84edc49d5480ebe4897629f841d76ae79a4fb0c2edb791a5/diff:/var/lib/docker/overlay2/2238ae349d70686609b990b63c0066d6e51d94be59801a81c7f5b4d97da1fe02/diff:/var/lib/docker/overlay2/704708b72448f8a4750db3aabd43c12f23ad7e6d3f727aa5977bd7ac4db8e8cb/diff:/var/lib/docker/overlay2/1b47e1515517af553fd8b986c841e87d8ba813d53739344c9b7350ad36b54b0b/diff:/var/lib/docker/overlay2/0a580802a7096343aa5d8de5039cf5a011e66e481793230dced8769b024e5cd2/diff:/var/lib/docker/overlay2/4da91655770b0e94236ea8da2ea8ff503467161cf85473a32760f89b56d213ff/diff:/var/lib/docker/overlay2/401c640771a27c70f20abf5c48b0be0e2f42ed5b022f81f58ebc0810831283ea/diff:/var/lib/docker/overlay2/8985c59d1ab32b8d8eaf4c11890801cb228d47cc7437b3e9b4f585e7296e4b6a/diff:/var/lib/docker/overlay2/ec66f9872de7b5310bac2bd5fd59552574df56bb06dcd5dd61ff2b63002d77ed/diff:/var/lib/docker/overlay2/fcf40217c8477dcf4e5fafc8c83408c3c788f367ed67c78cb0bc312439674fcf/diff",
"MergedDir": "/var/lib/docker/overlay2/8bcf7bf60181d555a11fb8df79a28cb2f9d8737d28fe913a252694ba2165c1d1/merged",
"UpperDir": "/var/lib/docker/overlay2/8bcf7bf60181d555a11fb8df79a28cb2f9d8737d28fe913a252694ba2165c1d1/diff",
"WorkDir": "/var/lib/docker/overlay2/8bcf7bf60181d555a11fb8df79a28cb2f9d8737d28fe913a252694ba2165c1d1/work"
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:87e96a33b6fb724886ccda863dcbf85aab1119d380dc8d60fc7eeace293fc3a8",
"sha256:7dfef4d05d0afc0383f5ebd8d9f3f7f7e17406f7e9e5744bead1a65e5ab47d0e",
"sha256:51a646f7fd864ded24db2d87aaef69767cec8cfa63117bdca1a80cc4e0a77329",
"sha256:9e2474c7feefaf8fe58cdb4d550edf725288c109f7842c819c734907406e9095",
"sha256:d4d38bb7d4b3e7ea2b17acce63dd4b9ed926c7c0bbe028393228caf8933a4482",
"sha256:4ec8c6264294fc505d796e17187c4c87099ff8f76ac8f337653e4643a9638d9e",
"sha256:84a0a1068d25a8fa7b0f3e966b0313d31bc9e7405484da2a9ebf0fe1ebaf40dc",
"sha256:956ab4664636dcce9d727ed0580f33ec510c8903ee827ce3ce72d4ba1184139b",
"sha256:55f8b1bcde6acbd521024e3d10ed4a3a3bdf567cfd029b1876bd646ff502270b",
"sha256:8c2496f1c442c3303273991e9cd5c4a5ffc0ab2ad7e2547976fe451095798390",
"sha256:583acd9a453ded660462a120737ffec2def4416a573c6ea7ed2b132e403d9c08",
"sha256:604c94797d42c86bfbc3d25e816a105b971805ae886bec8bc69bdae4ff20e1b6"
]
},
"Metadata": {
"LastTagTime": "0001-01-01T00:00:00Z"
}
}
]
I solved the problem by debugging the bash script and learning that it offers a custom directory which by which it runs scripts. I believe these get run after the db2 service starts. While I suspect that I could have specified an entrypoint, it would not necessarily work well with the ibmcom/db2 container. Posted below is my docker compose file showing volume mounts which I used for this particular container.
Note I think this defines mounts in a "bind" format, I believe, as opposed to a more common volume. My approach allowed me to pick where the source data is stored, whereas had I specified a volume, it would have picked some bizarre place on my WSL system to persist the volume data. There's probably a better approach, but I'm still learning Docker builds.
version: "3.2"
services:
ssc-file-generator-db2-test:
container_name: "ssc-file-generator-db2-test"
image: ibmcom/db2:latest
hostname: db2server
privileged: true
ports:
- 50100:50000
- 55100:55000
networks:
- back-tier
restart: "no"
volumes:
- setup-sql:/setup-sql
- db2-shell-scripts:/var/custom
- host-dirs:/host-dirs
# Uncomment below to use database outside the container
# - database:/database
env_file:
- acceptance-run.environment
networks:
back-tier: {}
volumes:
setup-sql:
driver: local
driver_opts:
o: bind
type: none
device: ./setup-sql
db2-shell-scripts:
driver: local
driver_opts:
o: bind
type: none
device: ./db2-shell-scripts
host-dirs:
driver: local
driver_opts:
o: bind
type: none
device: ./host-dirs

Unable to log into elastic search

I am trying to set up Filebeats/Elasticsearch/Kibana to monitor log files for my application.
I have the fairly minimal compose show below.
When I go to localhost:19200, I was able to get elastic search responses before I enabled security. Now, it prompts me to sign in. However, neither elastic and change nor kibana and changeme are accepted.
Attempting to change the password with curl by
curl -XPOST -u elastic:changeme 'localhost:19200/_security/user/elastic/_password' -H "Content-Type: application/json" -d "{
\"password\" : \"insecure\"
}"
also fails with an authentication error.
From the server log, the error is
elasticsearch_1 | {"type": "server", "timestamp": "2019-09-16T20:59:06,588+0000", "level": "INFO", "component": "o.e.x.s.a.AuthenticationService", "cluster.name": "compass", "node.name": "node-1", "cluster.uuid": "RZ_T1pT5Tp--3Jm8q89NVw", "node.id": "Q-lFQ58gRGOPPOEyzy6Vrw", "message": "Authentication of [elastic] was terminated by realm [reserved] - failed to authenticate user [elastic]" }
The JSON returned to curl is
{"error":{"root_cause":[{"type":"security_exception","reason":"failed to authenticate user [elastic]","header":{"WWW-Authenticate":"Basic realm=\"security\" charset=\"UTF-8\""}}],"type":"security_exception","reason":"failed to authenticate user [elastic]","header":{"WWW-Authenticate":"Basic realm=\"security\" charset=\"UTF-8\""}},"status":401}
What am I doing wrong?
docker-compose.yml
version: "2.4"
services:
# Accumulate logs into elasticstack
elasticsearch:
image: "docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}"
environment:
- http.host=0.0.0.0
- transport.host=127.0.0.1
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms${ES_JVM_HEAP} -Xmx${ES_JVM_HEAP}"
mem_limit: ${ES_MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./config/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- data:/usr/share/elasticsearch/data
#Port 9200 is available on the host. Need to for user to access as well as Packetbeat
ports: ['19200:9200']
#Healthcheck to confirm availability of ES. Other containers wait on this.
healthcheck:
test: ["CMD", "curl","-s" ,"-f", "-u", "elastic:${ES_PASSWORD}", "http://localhost:9200/_cat/health"]
#Internal network for the containers
networks: ['stack']
volumes:
#Es data
data:
driver: local
networks: {stack: {}}
.env
#ELK Stack
ELASTIC_VERSION=7.3.2
ES_PASSWORD=insecure
ES_MEM_LIMIT=2g
ES_JVM_HEAP=1024m
config/elasticsearch/elasticsearch.yml
cluster.name: compass
node.name: node-1
path.data: /usr/share/elasticsearch/data
http.port: 9200
network.host: 0.0.0.0
xpack.security:
enabled: true
transport.ssl.enabled: true
You should set up the built-in user passwords when you enable security, using
./bin/elasticsearch-setup-passwords interactive
See https://www.elastic.co/guide/en/elastic-stack-overview/current/get-started-built-in-users.html

How to make local changes reflect in docker container upon saving

I am trying to play with docker and I am still in the process of setting up my development environment. I am trying to set up my container in such a way that I can save changes on the host machine and have them propagated to the container.
I thought by using a volume I was mounting my local ./code directory into the container. I was hoping that by doing this I could run docker-compose up, develop, save and have those changes pushed into the container. Though after saving I am not seeing my changes reflected in the app until I kill it and run docker-compose up again. Am I using the correct concepts? or is this possible with docker?
Dockerfile
FROM node:10.13-alpine
ENV NODE_ENV production
# RUN mkdir /code
WORKDIR /code
docker-compose.yml
version: '2.1'
services:
express-docker-test:
build: code
# command: npm install --production --silent && mv node_modules ../
volumes:
- E:\git\express-docker-test\code\:/code/
environment:
NODE_ENV: production
expose:
- "3000"
ports:
- "3000:3000"
command: npm start
Here is a test repo I am experimenting with. I would like it if I could, with this repo, run docker-compose up. Then make an edit to the response say go from
- Hello world!!!
+ Hello World.
and then be able to refresh localhost:3000 and see the new response.
Results from docker inspect express-docker-test_express-docker-test
[
{
"Id": "sha256:791e8fbd5c871af53a37f5e9f5058e423f8ddf914b09f21ff1d80a40ea4f142f",
"RepoTags": [
"express-docker-test_express-docker-test:latest"
],
"RepoDigests": [],
"Parent": "sha256:e87553281ff90e49977fb6166c70c0e5ebf7bb98f0ae06468d7883dc0314c606",
"Comment": "",
"Created": "2019-07-14T19:15:47.1174538Z",
"Container": "8426c3db78c23c1cfc594c500ce77adf81ddd43ca52ca58456ba5b49ec60fee9",
"ContainerConfig": {
"Hostname": "8426c3db78c2",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NODE_VERSION=10.13.0",
"YARN_VERSION=1.10.1",
"NODE_ENV=production"
],
"Cmd": [
"/bin/sh",
"-c",
"#(nop) WORKDIR /code"
],
"ArgsEscaped": true,
"Image": "sha256:e87553281ff90e49977fb6166c70c0e5ebf7bb98f0ae06468d7883dc0314c606",
"Volumes": null,
"WorkingDir": "/code",
"Entrypoint": null,
"OnBuild": [],
"Labels": {}
},
"DockerVersion": "18.09.2",
"Author": "",
"Config": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NODE_VERSION=10.13.0",
"YARN_VERSION=1.10.1",
"NODE_ENV=production"
],
"Cmd": [
"node"
],
"ArgsEscaped": true,
"Image": "sha256:e87553281ff90e49977fb6166c70c0e5ebf7bb98f0ae06468d7883dc0314c606",
"Volumes": null,
"WorkingDir": "/code",
"Entrypoint": null,
"OnBuild": [],
"Labels": null
},
"Architecture": "amd64",
"Os": "linux",
"Size": 70255071,
"VirtualSize": 70255071,
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/8a0293f507ec80f68197b3bcb49b12e1a61c08f122a8a85060ab2d766d881a93/diff:/var/lib/docker/overlay2/a159e965ecf6e91533397def52fb1b3aef900c9793f933dc5120eed2381a37f4/diff:/var/lib/docker/overlay2/49f6493daf037bc88f9fb474ae71e36f13b58224082ff1f28ee367c795207c8d/diff",
"MergedDir": "/var/lib/docker/overlay2/9c4a3691e175f15585034acf7146ce5601f9e9d9c9cb60023bd348172116ae5c/merged",
"UpperDir": "/var/lib/docker/overlay2/9c4a3691e175f15585034acf7146ce5601f9e9d9c9cb60023bd348172116ae5c/diff",
"WorkDir": "/var/lib/docker/overlay2/9c4a3691e175f15585034acf7146ce5601f9e9d9c9cb60023bd348172116ae5c/work"
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:df64d3292fd6194b7865d7326af5255db6d81e9df29f48adde61a918fbd8c332",
"sha256:387bc77dd3f21547b74752dd03f6018b5d750684c832c39cd239704052ce366e",
"sha256:2faeaaebb1134fe62e2cc1603a761301e281c04a8e2e36ff2ac1005f7c06780f",
"sha256:dfb2bf93a77a1907921d8f1622e831fa31c924c6c612e593b8937f95e42a0afa"
]
},
"Metadata": {
"LastTagTime": "2019-07-14T19:31:00.4646905Z"
}
}
]
Don't RUN mkdir /code in the Dockerfile.
If you're mounting a local directory into /code, then your Dockerfile should just expect it be exist (have been mounted by docker run ...--volume=` or by Docker Compose as you have.
Leave WORKDIR /code.
I thought mounts had to be absolute paths but if ./code then works, you're good.

Resources