spring boot image doesn't run when pulled from docker hub - docker

I have push spring boot image to docker hub it is also dependent on PostgreSQL image.
I get error PSQLEXception : The connection attempt failed. When running the pulled spring boot image.
Should I also push PostgreSQL image too or is there a way to push booth the images Together.
version: '3.1'
services:
app:
container_name: app-springboot-furniture
image: app-springboot-furniture
build: ./
ports:
- "8090:8090"
depends_on:
- postgres
postgres:
image: postgres
restart: always
ports:
- "5432:5432"
volumes:
- app-data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=123
- POSTGRES_USER=postgres
- POSTGRES_DB=nextlevel
pgadmin:
container_name: pgadmin4_container_furniture
image: dpage/pgadmin4:5.5
restart: always
environment:
PGADMIN_DEFAULT_EMAIL: admin#admin.com
PGADMIN_DEFAULT_PASSWORD: secret
PGADMIN_LISTEN_PORT: 80
ports:
- "8080:80"
volumes:
app-data:
FROM openjdk:11
EXPOSE 8090
ADD target/spring-boot-e-commerce-furniture.jar furniture.jar
ENTRYPOINT ["java","-jar","furniture.jar"]
Caused by: org.postgresql.util.PSQLException: The connection attempt failed.
at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:297) ~[postgresql-42.2.14.jar!/:42.2.14]
at org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:49) ~[postgresql-42.2.14.jar!/:42.2.14]
at org.postgresql.jdbc.PgConnection.<init>(PgConnection.java:217) ~[postgresql-42.2.14.jar!/:42.2.14]
at org.postgresql.Driver.makeConnection(Driver.java:458) ~[postgresql-42.2.14.jar!/:42.2.14]
at org.postgresql.Driver.connect(Driver.java:260) ~[postgresql-42.2.14.jar!/:42.2.14]
at com.zaxxer.hikari.util.DriverDataSource.getConnection(DriverDataSource.java:138) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.pool.PoolBase.newConnection(PoolBase.java:358) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.pool.PoolBase.newPoolEntry(PoolBase.java:206) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.pool.HikariPool.createPoolEntry(HikariPool.java:477) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.pool.HikariPool.checkFailFast(HikariPool.java:560) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.pool.HikariPool.<init>(HikariPool.java:115) ~[HikariCP-3.4.5.jar!/:na]
at com.zaxxer.hikari.HikariDataSource.getConnection(HikariDataSource.java:112) ~[HikariCP-3.4.5.jar!/:na]
at org.hibernate.engine.jdbc.connections.internal.DatasourceConnectionProviderImpl.getConnection(DatasourceConnectionProviderImpl.java:122) ~[hibernate-core-5.4.17.Final.jar!/:
5.4.17.Final]

Related

NextJS gives an error during the build step in Docker when trying to connect to strapi api

I am trying to run nextjs with strapi by using docker. NextJS app is giving an error during the npm packages build step like this:
request to http://localhost:1337/api/categories?fields%5B0%5D=slug failed, reason: connect ECONNREFUSED
I tried to change localhost to strapi but it didn't help too.
Here is my docker-compose.yaml file:
version: '3'
services:
strapi:
build:
context: ./api
dockerfile: Dockerfile
environment:
DATABASE_CLIENT: postgres
DATABASE_NAME: strapi
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_USERNAME: strapi
DATABASE_PASSWORD: strapi
volumes:
- ./api:/srv/api
ports:
- '1337:1337'
depends_on:
- postgres
networks:
- strapi_app
postgres:
image: postgres
environment:
POSTGRES_DB: strapi
POSTGRES_USER: strapi
POSTGRES_PASSWORD: strapi
ports:
- "5432:5432"
volumes:
- ./data:/var/lib/postgresql/data
networks:
- strapi_app
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
ports:
- "3000:3000"
networks:
- strapi_app
depends_on:
- strapi
networks:
strapi_app:
driver: bridge
I created a network for the services in docker-compose.yaml file. They should run in the same network. I couldn't realize what I am missing.
Also I couldn't find any effective solution from stackoverflow or google search.
Any help will be appreciated!
Thanks

The docker compose doesn't run

I tried to up the docker compose and received the following error:
$ docker-compose up
Creating network "evaluatehumanbalance_default" with the default driver
Pulling redis (redis:6.0.6)...
6.0.6: Pulling from library/redis
bf5952930446: Pull complete
911b8422b695: Pull complete
093b947e0ade: Pull complete
5b1d5f59e382: Pull complete
7a5f59580c0b: Pull complete
f9c63997c980: Pull complete
Digest: sha256:09c33840ec47815dc0351f1eca3befe741d7105b3e95bc8fdb9a7e4985b9e1e5
Status: Downloaded newer image for redis:6.0.6
Pulling zookeeper (confluentinc/cp-zookeeper:5.5.1)...
5.5.1: Pulling from confluentinc/cp-zookeeper
0cd7281e66ed: Pull complete
ee8abe01e201: Pull complete
19bb39092429: Pull complete
e8a27d9d6e72: Pull complete
cadbdfe0e559: Pull complete
184cb34023c9: Pull complete
Digest: sha256:1ef59713eea58401b333827dc44f23556cbc4b6437968a261f0b0a7b105126be
Status: Downloaded newer image for confluentinc/cp-zookeeper:5.5.1
Pulling kafka (confluentinc/cp-kafka:5.5.1)...
5.5.1: Pulling from confluentinc/cp-kafka
0cd7281e66ed: Already exists
ee8abe01e201: Already exists
19bb39092429: Already exists
e8a27d9d6e72: Already exists
8efe498170fa: Pull complete
b5050338516f: Pull complete
Digest: sha256:4de6a6f317991d858fe1bd84636c55dc17d9312db6d4a80be0f85354b9e481fc
Status: Downloaded newer image for confluentinc/cp-kafka:5.5.1
Pulling banking-simulation (gcr.io/simulation-screenshots/banking-simulation:)...
ERROR: Head https://gcr.io/v2/simulation-screenshots/banking-simulation/manifests/latest: unknown: Project 'project:simulation-screenshots' not found or deleted.
This is when I try to run it interactively:
$ docker run -it gcr.io/simulation-screenshots/banking-simulation
Unable to find image 'gcr.io/simulation-screenshots/banking-simulation:latest' locally
docker: Error response from daemon: Head https://gcr.io/v2/simulation-screenshots/banking-simulation/manifests/latest: unknown: Project 'project:simulation-screenshots' not found or deleted.
The docker-compose.yaml file is provided:
#
# This docker-compose file starts and runs:
# * A redis server
# * A 1-node kafka cluster
# * A 1-zookeeper ensemble
# * Kafka Connect with Redis Source
# * 3 Java Applications- Trucking-Simulation, Banking-Simulation, and STEDI
# * A Spark master
# * A Spark worker
version: '3.7'
services:
redis:
image: redis:6.0.6
ports:
- "6379:6379"
zookeeper:
image: confluentinc/cp-zookeeper:5.5.1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: "2181"
kafka:
image: confluentinc/cp-kafka:5.5.1
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: 0
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_ADVERTISED_LISTENERS: "INTERNAL://kafka:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092"
KAFKA_INTER_BROKER_LISTENER_NAME: "INTERNAL"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
depends_on:
- "zookeeper"
banking-simulation:
image: gcr.io/simulation-screenshots/banking-simulation
environment:
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_DB: 0
KAFKA_BROKER: kafka:19092
depends_on:
- "kafka"
- "redis"
trucking-simulation:
image: gcr.io/simulation-screenshots/trucking-simulation
environment:
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_DB: 0
KAFKA_BROKER: kafka:19092
depends_on:
- "kafka"
- "redis"
stedi:
image: gcr.io/simulation-screenshots/stedi
ports:
- "4567:4567"
environment:
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_DB: 0
KAFKA_BROKER: kafka:19092
KAFKA_RISK_TOPIC: risk-topic
depends_on:
- "kafka"
- "redis"
connect:
image: gcr.io/simulation-screenshots/kafka-connect-redis-source
ports:
- "8083:8083"
- "5005:5005"
environment:
CONNECT_BOOTSTRAP_SERVERS: "PLAINTEXT://kafka:19092"
CONNECT_GROUP_ID: "connect"
CONNECT_REST_ADVERTISED_HOST_NAME: "connect"
CONNECT_PLUGIN_PATH: "/usr/share/java"
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.converters.ByteArrayConverter"
CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.storage.StringConverter"
CONNECT_CONFIG_STORAGE_TOPIC: "connect-config"
CONNECT_OFFSET_STORAGE_TOPIC: "connect-offset"
CONNECT_STATUS_STORAGE_TOPIC: "connect-status"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_DEBUG: "y"
DEBUG_SUSPEND_FLAG: "y"
CLASSPATH: "/usr/share/java/kafka-connect-redis-source/*"
depends_on:
- "kafka"
- "redis"
spark:
image: docker.io/bitnami/spark:3-debian-10
environment:
- SPARK_MODE=master
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
ports:
- '8080:8080'
volumes:
- ./:/home/workspace/
- ./spark/jars:/opt/bitnami/spark/.ivy2
spark-worker-1:
image: docker.io/bitnami/spark:3-debian-10
environment:
- SPARK_MODE=worker
- SPARK_MASTER_URL=spark://spark:7077
- SPARK_WORKER_MEMORY=1G
- SPARK_WORKER_CORES=1
- SPARK_RPC_AUTHENTICATION_ENABLED=no
- SPARK_RPC_ENCRYPTION_ENABLED=no
- SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no
- SPARK_SSL_ENABLED=no
volumes:
- ./:/home/workspace/
- ./spark/jars:/opt/bitnami/spark/.ivy2
What's the issue here and how do I solve it?
The repository (in the form of a Google Cloud Platform project) has been deleted (or made inaccessible). As a result you're unable to retrieve the image from the repository.
You may want to contact the author of the documentation that you're using to ask for an update.
You can confirm this by browsing the link:
https://gcr.io/v2/simulation-screenshots/banking-simulation/manifests/latest
For extant repositories|images, Google Container Registry (GCR and hence gcr.io) will redirect HTTP GETs to a registry browser (https://console.cloud.google.com) so that you may browse the repository. Here's an (unrelated) example to show how it would usually work:
https://gcr.io/cadvisor/cadvisor:v0.40.0
The GCR registry has image:tag URLs of the form:
[us|eu].gcr.io/${PROJECT}/${IMAGE}:${TAG}
I changed the image version to 'latest'.
zookeeper:
image: confluentinc/cp-zookeeper:latest
Found it here: https://github.com/confluentinc/cp-docker-images/issues/582

How can I call from my frontend docker nginx to my backend in nodeJs docker with docker swarmn

I have 3 containers that I communicate with docker swarm, if I run my application with http and connect as http //domain.com everything works fine, but if I use https ( //www.domain.com) I can't communicate my frontend with the backend and I get the following error:
Ajax.js: 10 POST https //www.domain.com/Init net :: ERR_NAME_NOT_RESOLVED
can someone help me solve my problem
and understand the mistake
Thank you
I leave my compose
version: '3'
services:
ssl:
image: danieldent/nginx-ssl-proxy
restart: always
environment:
UPSTREAM: myApp:8086
SERVERNAME: dominio.com
ports:
- 80:80/tcp
- 443:443/tcp
depends_on:
- myApp
volumes:
- ./nginxAPP:/etc/letsencrypt
- ./nginxAPP:/etc/nginx/user.conf.d:ro
bdd:
restart: always
image: postgres:12
ports:
- 5432:5432/tcp
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: 12345
POSTGRES_DB: miBDD
volumes:
- ./pgdata:/var/lib/postgresql/data
pgadmin:
image: dpage/pgadmin4
ports:
- 9095:80/tcp
environment:
PGADMIN_DEFAULT_EMAIL: user
PGADMIN_DEFAULT_PASSWORD: 12345
PROXY_X_FOR_COUNT: 3
PROXY_X_PROTO_COUNT: 3
PROXY_X_HOST_COUNT: 3
PROXY_X_PORT_COUNT: 3
volumes:
- ./pgadminAplicattion:/var/lib/pgadmin
myApp:
restart: always
image: appImage
ports:
- 8086:8086
depends_on:
- bdd
working_dir: /usr/myApp
environment:
CONFIG_PATH: ../configuation
command: "node server.js"

HIVE Docker container to use with existing Hadoop instances

I have 2 working containers for Hadoop HDFS (I have built a Dockerfile for them starting from the one here) and need to add a HIVE instance to the set.
I cannot find a good working image to use (it would be nice to have version 2.3.4).
Is there anything you suggest, easy to add, that I could use?
Thank you!
Edit:
Here is a try I did:
hive-server:
container_name: hive-server
image: bde2020/hive:2.3.2-postgresql-metastore
depends_on:
- hadoop-namenode
env_file:
- ./hive_build/hadoop-hive.env
environment:
HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://hive-metastore/metastore"
SERVICE_PRECONDITION: "hive-metastore:9083"
ports:
- "10000:10000"
restart: unless-stopped
hive-metastore:
container_name: hive-metastore
image: bde2020/hive:2.3.2-postgresql-metastore
depends_on:
- hive-server
- hive-metastore-postgresql
env_file:
- ./hive_build/hadoop-hive.env
command: /opt/hive/bin/hive --service metastore
environment:
SERVICE_PRECONDITION: "hadoop-namenode:50070 hadoop-datanode1:50075 hive-metastore-postgresql:5432"
ports:
- "9083:9083"
restart: unless-stopped
hive-metastore-postgresql:
container_name: hive-metastore-postgresql
image: bde2020/hive-metastore-postgresql:2.3.0
ports:
- "5433:5432"
restart: unless-stopped
but when I enter it and try to connect I get an error:
docker exec -it hive-server bash
/opt/hive/bin/beeline -u jdbc:hive2://localhost:10000
error:
19/05/03 09:13:46 [main]: WARN jdbc.HiveConnection: Failed to connect to localhost:10000
Could not open connection to the HS2 server. Please check the server URI and if the URI is correct, then ask the administrator to check the server status.
Error: Could not open client transport with JDBC Uri: jdbc:hive2://localhost:10000: java.net.ConnectException: Connection refused (Connection refused) (state=08S01,code=0)

Docker Compose : SpringBoot application with mysql:8

Below is my docker-compose.yml which composing two docker files :
mysql.dockerfile
FROM mysql:8
ADD DockerImages/mysql /docker-entrypoint-initdb.d/
springapp.dockerfile
FROM maven:3.5-jdk-8 <br>
ADD target/users-mysql.jar users-mysql.jar<br>
ENTRYPOINT ["/usr/bin/java"]<br>
CMD ["-jar", "users-mysql.jar"]<br>
VOLUME /var/lib/spring-cloud/config-repo
docker-compose.yml
version: '3'
services:
springapp:
build:
context: .
dockerfile: springapp.dockerfile
expose:
- "8888"
ports:
- 8888:8888
links:
- mysqldb:mysqldb
networks:
- spring-demo-network
depends_on:
- mysqldb
environment:
- DATABASE_HOST=mysqldb
- DATABASE_USER=gl_user
- DATABASE_PASSWORD=global#123
- DATABASE_NAME=test_db
mysqldb:
build:
context: .
dockerfile: mysql.dockerfile
ports:
- 3306:3306
container_name : mysql-standalone
networks:
- spring-demo-network
environment:
MYSQL_DATABASE: test_db
MYSQL_USER: gl_user
MYSQL_PASSWORD: global#123
MYSQL_ROOT_PASSWORD: root
MYSQL_ROOT_HOST: '%'
networks:
spring-demo-network:
driver: bridge
I am unable to connect to the database its throws multiple errors like
Error:
java.sql.SQLException: null, message from server: "Host '172.18.0.3'
is not allowed to connect to this MySQL server"
Caused by: java.net.ConnectException: Connection refused (Connection
refused)

Resources