Connection Refused Cloud SQL Proxy Docker - docker

I am having an issue connecting my Django app to a Google Cloud Postgres database through Cloud SQL proxy when using Docker. The error I keep getting is 'connection refused'. Here's the error output
cloud-sql-proxy_1 | 2021/12/30 19:11:30 current FDs rlimit set to 1048576, wanted limit is 8500. Nothing to do here.
cloud-sql-proxy_1 | 2021/12/30 19:11:30 using credential file for authentication; email=serviceacc#gamr-335802.iam.gserviceaccount.com
cloud-sql-proxy_1 | 2021/12/30 19:11:30 Listening on 127.0.0.1:5432 for gamr-335802:us-central1:gamr
cloud-sql-proxy_1 | 2021/12/30 19:11:30 Ready for new connections
cloud-sql-proxy_1 | 2021/12/30 19:11:30 Generated RSA key in 140.67675ms
gamr-backend_1 | Performing system checks...
gamr-backend_1 |
gamr-backend_1 | System check identified no issues (0 silenced).
gamr-backend_1 | Traceback (most recent call last):
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 230, in ensure_connection
gamr-backend_1 | self.connect()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 211, in connect
gamr-backend_1 | self.connection = self.get_new_connection(conn_params)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/postgresql/base.py", line 199, in get_new_connection
gamr-backend_1 | connection = Database.connect(**conn_params)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
gamr-backend_1 | conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
gamr-backend_1 | psycopg2.OperationalError: could not connect to server: Connection refused
gamr-backend_1 | Is the server running locally and accepting
gamr-backend_1 | connections on Unix domain socket "/cloudsql/gamr-335802:us-central1:gamr/.s.PGSQL.5432"?
gamr-backend_1 |
gamr-backend_1 |
gamr-backend_1 | The above exception was the direct cause of the following exception:
gamr-backend_1 |
gamr-backend_1 | Traceback (most recent call last):
gamr-backend_1 | File "manage.py", line 22, in <module>
gamr-backend_1 | main()
gamr-backend_1 | File "manage.py", line 18, in main
gamr-backend_1 | execute_from_command_line(sys.argv)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 425, in execute_from_command_line
gamr-backend_1 | utility.execute()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute
gamr-backend_1 | self.fetch_command(subcommand).run_from_argv(self.argv)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 373, in run_from_argv
gamr-backend_1 | self.execute(*args, **cmd_options)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/runserver.py", line 66, in execute
gamr-backend_1 | super().execute(*args, **options)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 417, in execute
gamr-backend_1 | output = self.handle(*args, **options)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/runserver.py", line 101, in handle
gamr-backend_1 | self.run(**options)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/runserver.py", line 110, in run
gamr-backend_1 | self.inner_run(None, **options)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/runserver.py", line 127, in inner_run
gamr-backend_1 | self.check_migrations()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 505, in check_migrations
gamr-backend_1 | executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 18, in __init__
gamr-backend_1 | self.loader = MigrationLoader(self.connection)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/loader.py", line 53, in __init__
gamr-backend_1 | self.build_graph()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/loader.py", line 223, in build_graph
gamr-backend_1 | self.applied_migrations = recorder.applied_migrations()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/recorder.py", line 77, in applied_migrations
gamr-backend_1 | if self.has_table():
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/recorder.py", line 55, in has_table
gamr-backend_1 | with self.connection.cursor() as cursor:
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 270, in cursor
gamr-backend_1 | return self._cursor()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 246, in _cursor
gamr-backend_1 | self.ensure_connection()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 230, in ensure_connection
gamr-backend_1 | self.connect()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
gamr-backend_1 | raise dj_exc_value.with_traceback(traceback) from exc_value
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 230, in ensure_connection
gamr-backend_1 | self.connect()
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/base/base.py", line 211, in connect
gamr-backend_1 | self.connection = self.get_new_connection(conn_params)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/utils/asyncio.py", line 25, in inner
gamr-backend_1 | return func(*args, **kwargs)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/django/db/backends/postgresql/base.py", line 199, in get_new_connection
gamr-backend_1 | connection = Database.connect(**conn_params)
gamr-backend_1 | File "/usr/local/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
gamr-backend_1 | conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
gamr-backend_1 | django.db.utils.OperationalError: could not connect to server: Connection refused
gamr-backend_1 | Is the server running locally and accepting
gamr-backend_1 | connections on Unix domain socket "/cloudsql/gamr-335802:us-central1:gamr/.s.PGSQL.5432"?
gamr-backend_1 |
and here is my docker compose file
version: '3'
services:
cloud-sql-proxy:
image: gcr.io/cloudsql-docker/gce-proxy
command: /cloud_sql_proxy -dir=/cloudsql -instances=gamr-335802:us-central1:gamr=tcp:127.0.0.1:5432 -credential_file=/django_backend/cloud/gamr-335802-e8f23fcc176c.json
ports:
- "127.0.0.1:5432:5432"
volumes:
- /cloudsql:/cloudsql
- ./django_backend/cloud/gamr-335802-e8f23fcc176c.json:/django_backend/cloud/gamr-335802-e8f23fcc176c.json
restart: always
expose:
- "5432"
gamr-backend:
build: django_backend/
command: ./docker.sh
volumes:
- .:/django
- /cloudsql:/cloudsql
ports:
- "8000:8000"
depends_on:
- cloud-sql-proxy
Because they are different services, they are running in different containers. Is there a way I can connect them? My Django app tries to connect at localhost:5432 but can't reach the proxy server. I'm really new to Docker and GCP so I've been stuck on this for a bit, any help would be appreciated!

Related

Can't connect to Milvus using Pymilvus inside docker. MilvusException: (code=2, message=Fail connecting to server on localhost:19530. Timeout)

I'm trying to connect to a Milvus server using Pymilvus. The server is up and running but I can't connect to it: MilvusException: (code=2, message=Fail connecting to server on localhost:19530. Timeout)
I'm running both using docker compose:
version: "3.5"
services:
etcd:
container_name: milvus-etcd
image: quay.io/coreos/etcd:v3.5.0
networks:
app_net:
environment:
- ETCD_AUTO_COMPACTION_MODE=revision
- ETCD_AUTO_COMPACTION_RETENTION=1000
- ETCD_QUOTA_BACKEND_BYTES=4294967296
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/etcd:/etcd
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
minio:
container_name: milvus-minio
image: minio/minio:RELEASE.2022-03-17T06-34-49Z
networks:
app_net:
environment:
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/minio:/minio_data
command: minio server /minio_data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
standalone:
container_name: milvus-standalone
image: milvusdb/milvus:v2.1.4
networks:
app_net:
ipv4_address: 172.16.238.10
command: ["milvus", "run", "standalone"]
environment:
ETCD_ENDPOINTS: etcd:2379
MINIO_ADDRESS: minio:9000
volumes:
- ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus
ports:
- "19530:19530"
depends_on:
- "etcd"
- "minio"
fastapi:
build: ./fastapi
command: uvicorn app.main:app --host 0.0.0.0
restart: always
networks:
app_net:
ipv4_address: 172.16.238.12
environment:
MILVUS_HOST: '172.16.238.10'
depends_on:
- standalone
ports:
- "80:80"
volumes:
- pfindertest:/data/fast
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:80"]
interval: 30s
timeout: 20s
retries: 3
networks:
app_net:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.16.238.0/24
gateway: 172.16.238.1
volumes:
pfindertest:
Dockerfile
FROM python:3.8
WORKDIR /code
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
COPY ./app /code/app
main.py
from fastapi import FastAPI
import uvicorn
from pymilvus import connections
app = FastAPI()
#app.get("/")
def read_root():
return {"Hello": "World"}
connections.connect(
alias="default",
host='localhost',
port='19530'
)
I'm getting the following error:
milvus-1-fastapi-1 | Traceback (most recent call last):
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/pymilvus/client/grpc_handler.py", line 115, in _wait_for_channel_ready
milvus-1-fastapi-1 | grpc.channel_ready_future(self._channel).result(timeout=3)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/grpc/_utilities.py", line 139, in result
milvus-1-fastapi-1 | self._block(timeout)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/grpc/_utilities.py", line 85, in _block
milvus-1-fastapi-1 | raise grpc.FutureTimeoutError()
milvus-1-fastapi-1 | grpc.FutureTimeoutError
milvus-1-fastapi-1 |
milvus-1-fastapi-1 | During handling of the above exception, another exception occurred:
milvus-1-fastapi-1 |
milvus-1-fastapi-1 | Traceback (most recent call last):
milvus-1-fastapi-1 | File "/usr/local/bin/uvicorn", line 8, in <module>
milvus-1-fastapi-1 | sys.exit(main())
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1130, in __call__
milvus-1-fastapi-1 | return self.main(*args, **kwargs)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1055, in main
milvus-1-fastapi-1 | rv = self.invoke(ctx)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1404, in invoke
milvus-1-fastapi-1 | return ctx.invoke(self.callback, **ctx.params)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/click/core.py", line 760, in invoke
milvus-1-fastapi-1 | return __callback(*args, **kwargs)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/main.py", line 404, in main
milvus-1-fastapi-1 | run(
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/main.py", line 569, in run
milvus-1-fastapi-1 | server.run()
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/server.py", line 60, in run
milvus-1-fastapi-1 | return asyncio.run(self.serve(sockets=sockets))
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
milvus-1-fastapi-1 | return loop.run_until_complete(main)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
milvus-1-fastapi-1 | return future.result()
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/server.py", line 67, in serve
milvus-1-fastapi-1 | config.load()
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/config.py", line 474, in load
milvus-1-fastapi-1 | self.loaded_app = import_from_string(self.app)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/uvicorn/importer.py", line 21, in import_from_string
milvus-1-fastapi-1 | module = importlib.import_module(module_str)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/importlib/__init__.py", line 127, in import_module
milvus-1-fastapi-1 | return _bootstrap._gcd_import(name[level:], package, level)
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap>", line 991, in _find_and_load
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap_external>", line 843, in exec_module
milvus-1-fastapi-1 | File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
milvus-1-fastapi-1 | File "/code/./app/main.py", line 11, in <module>
milvus-1-fastapi-1 | connections.connect(
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/pymilvus/orm/connections.py", line 262, in connect
milvus-1-fastapi-1 | connect_milvus(**kwargs, password=password)
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/pymilvus/orm/connections.py", line 233, in connect_milvus
milvus-1-fastapi-1 | gh._wait_for_channel_ready()
milvus-1-fastapi-1 | File "/usr/local/lib/python3.8/site-packages/pymilvus/client/grpc_handler.py", line 118, in _wait_for_channel_ready
milvus-1-fastapi-1 | raise MilvusException(Status.CONNECT_FAILED, f'Fail connecting to server on {self._address}. Timeout')
milvus-1-fastapi-1 | pymilvus.exceptions.MilvusException: <MilvusException: (code=2, message=Fail connecting to server on localhost:19530. Timeout)>
milvus-1-fastapi-1 exited with code 1
The Milvus server appears to be working so that's not the problem.
NAME COMMAND SERVICE STATUS PORTS
milvus-1-fastapi-1 "uvicorn app.main:ap…" fastapi restarting 0.0.0.0:80->80/tcp
milvus-etcd "etcd -advertise-cli…" etcd running 2379-2380/tcp
milvus-minio "/usr/bin/docker-ent…" minio running (healthy) 9000/tcp
milvus-standalone "/tini -- milvus run…" standalone running 0.0.0.0:9091->9091/tcp, 0.0.0.0:19530->19530/tcp
I'm running Docker on Mac if that is important. I tried using gitpod.io but the error remains.

Docker containers inside same network not able to communicate with each other

I have three docker containers running in the same network. I used docker-compose to bring up the containers. The docker-compose script is:
version: '3.5'
services:
### Jesse's Workspace ################################################
jesse:
image: salehmir/jesse:latest
depends_on:
- postgres
- redis
tty: true
env_file:
- ../.env
ports:
- "9000:9000"
# Jupyter Port
- "8888:8888"
volumes:
- ../:/home
container_name: jesse
command: bash -c "jesse install-live --no-strict && jesse run"
### PostgreSQL ################################################
postgres:
image: postgres:14-alpine
restart: always
environment:
- POSTGRES_USER=jesse_user
- POSTGRES_PASSWORD=password
- POSTGRES_DB=jesse_db
ports:
- "5432:5432"
volumes:
- postgres-data:/var/lib/postgresql/data
container_name: postgres
### Redis ################################################
redis:
image: redis:6-alpine
ports:
- "6379:6379"
container_name: redis
command: redis-server --save "" --appendonly no
volumes:
postgres-data:
Since I have not specified networks, I have checked all the containers are running inside the docker_default bridge network. DNS resolution works fine inside the containers by container names, but ping doesn't work neither any type of connectivity.
Since, I have exposed the port 6379 of the redis container, I am able to connect to redis from my host system. 127.0.0.1:6379. But, from any other container the connection is refused. I have tried to spin up another ubuntu container inside the same network, and noticed that I don't have internet connectivity inside the containers, i.e, no outgoing traffic. I am guessing this is something OS specific, as the same setup runs smoothly on my Mac.
I have checked the ufw firewall status, which is inactive.
The jesse container is trying to connect to redis, which is not accepting any connections.
Traceback (most recent call last):
jesse | File "/usr/local/bin/jesse", line 33, in <module>
jesse | sys.exit(load_entry_point('jesse', 'console_scripts', 'jesse')())
jesse | File "/usr/local/bin/jesse", line 25, in importlib_load_entry_point
jesse | return next(matches).load()
jesse | File "/usr/local/lib/python3.9/importlib/metadata.py", line 77, in load
jesse | module = import_module(match.group('module'))
jesse | File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module
jesse | return _bootstrap._gcd_import(name[level:], package, level)
jesse | File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
jesse | File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
jesse | File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
jesse | File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
jesse | File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
jesse | File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
jesse | File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
jesse | File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
jesse | File "<frozen importlib._bootstrap_external>", line 850, in exec_module
jesse | File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
jesse | File "/jesse-docker/jesse/__init__.py", line 12, in <module>
jesse | from jesse.services import auth as authenticator
jesse | File "/jesse-docker/jesse/services/auth.py", line 5, in <module>
jesse | from jesse.services.env import ENV_VALUES
jesse | File "/jesse-docker/jesse/services/env.py", line 18, in <module>
jesse | if jh.is_unit_testing():
jesse | File "/jesse-docker/jesse/helpers.py", line 368, in is_unit_testing
jesse | from jesse.config import config
jesse | File "/jesse-docker/jesse/config.py", line 2, in <module>
jesse | from jesse.modes.utils import get_exchange_type
jesse | File "/jesse-docker/jesse/modes/utils.py", line 3, in <module>
jesse | from jesse.services import logger
jesse | File "/jesse-docker/jesse/services/logger.py", line 3, in <module>
jesse | from jesse.services.redis import sync_publish
jesse | File "/jesse-docker/jesse/services/redis.py", line 23, in <module>
jesse | async_redis = asyncio.run(init_redis())
jesse | File "/usr/local/lib/python3.9/asyncio/runners.py", line 44, in run
jesse | return loop.run_until_complete(main)
jesse | File "/usr/local/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
jesse | return future.result()
jesse | File "/jesse-docker/jesse/services/redis.py", line 12, in init_redis
jesse | return await aioredis.create_redis_pool(
jesse | File "/usr/local/lib/python3.9/site-packages/aioredis/commands/__init__.py", line 188, in create_redis_pool
jesse | pool = await create_pool(address, db=db,
jesse | File "/usr/local/lib/python3.9/site-packages/aioredis/pool.py", line 58, in create_pool
jesse | await pool._fill_free(override_min=False)
jesse | File "/usr/local/lib/python3.9/site-packages/aioredis/pool.py", line 383, in _fill_free
jesse | conn = await self._create_new_connection(self._address)
jesse | File "/usr/local/lib/python3.9/site-packages/aioredis/connection.py", line 111, in create_connection
jesse | reader, writer = await asyncio.wait_for(open_connection(
jesse | File "/usr/local/lib/python3.9/asyncio/tasks.py", line 442, in wait_for
jesse | return await fut
jesse | File "/usr/local/lib/python3.9/site-packages/aioredis/stream.py", line 23, in open_connection
jesse | transport, _ = await get_event_loop().create_connection(
jesse | File "/usr/local/lib/python3.9/asyncio/base_events.py", line 1056, in create_connection
jesse | raise exceptions[0]
jesse | File "/usr/local/lib/python3.9/asyncio/base_events.py", line 1041, in create_connection
jesse | sock = await self._connect_sock(
jesse | File "/usr/local/lib/python3.9/asyncio/base_events.py", line 955, in _connect_sock
jesse | await self.sock_connect(sock, address)
jesse | File "/usr/local/lib/python3.9/asyncio/selector_events.py", line 502, in sock_connect
jesse | return await fut
jesse | File "/usr/local/lib/python3.9/asyncio/selector_events.py", line 537, in _sock_connect_cb
jesse | raise OSError(err, f'Connect call failed {address}')
jesse | TimeoutError: [Errno 110] Connect call failed ('172.18.0.2', 6379)
The python code that is used to connect:
async def init_redis():
return await aioredis.create_redis_pool(
address=(ENV_VALUES['REDIS_HOST'], ENV_VALUES['REDIS_PORT']),
password=ENV_VALUES['REDIS_PASSWORD'] or None,
db=int(ENV_VALUES.get('REDIS_DB') or 0),
)
The .env values
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_PASSWORD=
docker ps:
docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2a741ee69b20 postgres:14-alpine "docker-entrypoint.s…" About an hour ago Up 58 minutes 0.0.0.0:5432->5432/tcp, :::5432->5432/tcp postgres
9012709c0bd1 redis:6-alpine "docker-entrypoint.s…" About an hour ago Up 58 minutes 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp redis
I tried to ping the redis container from postgres container like this:
docker exec -it 2a ping redis
PING redis (172.18.0.2): 56 data bytes
^C
--- redis ping statistics ---
26 packets transmitted, 0 packets received, 100% packet loss
So, the DNS resolution works fine, but communication is not working. Whereas I can connect to redis from my host system.
The containers have to run in the same docker network https://docs.docker.com/compose/networking/

Neo4j - cartography ConnectionRefusedError?

I am getting the following error when running docker compose but when I go to bolt://localhost:7687 I do get the graph and all the labels and nodes.
Dockerfile and Docker compose are attached.
ERROR:neo4j:Unable to retrieve routing information
cartography-cartography-1 | Traceback (most recent call last):
cartography-cartography-1 | File "/usr/local/bin/cartography", line 33, in <module>
cartography-cartography-1 | sys.exit(load_entry_point('cartography', 'console_scripts', 'cartography')())
cartography-cartography-1 | File "/var/cartography/cartography/cli.py", line 553, in main
cartography-cartography-1 | sys.exit(CLI(default_sync, prog='cartography').main(argv))
cartography-cartography-1 | File "/var/cartography/cartography/cli.py", line 533, in main
cartography-cartography-1 | return cartography.sync.run_with_config(self.sync, config)
cartography-cartography-1 | File "/var/cartography/cartography/sync.py", line 163, in run_with_config
cartography-cartography-1 | return sync.run(neo4j_driver, config)
cartography-cartography-1 | File "/var/cartography/cartography/sync.py", line 81, in run
cartography-cartography-1 | with neo4j_driver.session() as neo4j_session:
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 167, in wrapper
cartography-cartography-1 | patch_session_obj(neo4j_session)
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 155, in patch_session_obj
cartography-cartography-1 | detect_neo4j_version(neo4j_session)
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 143, in detect_neo4j_version
cartography-cartography-1 | result = neo4j_session.run(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/simple.py", line 204, in run
cartography-cartography-1 | self._connect(self._config.default_access_mode)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/simple.py", line 108, in _connect
cartography-cartography-1 | super()._connect(access_mode)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/__init__.py", line 75, in _connect
cartography-cartography-1 | self._pool.update_routing_table(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 1213, in update_routing_table
cartography-cartography-1 | raise ServiceUnavailable("Unable to retrieve routing information")
cartography-cartography-1 | neo4j.exceptions.ServiceUnavailable: Unable to retrieve routing information
cartography-cartography-1 | INFO:cartography.sync:Starting sync with update tag '1661424422'
cartography-cartography-1 | Traceback (most recent call last):
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/_socket.py", line 147, in _connect
cartography-cartography-1 | s.connect(resolved_address)
cartography-cartography-1 | ConnectionRefusedError: [Errno 111] Connection refused
cartography-cartography-1 |
cartography-cartography-1 | During handling of the above exception, another exception occurred:
cartography-cartography-1 |
cartography-cartography-1 | Traceback (most recent call last):
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/_socket.py", line 284, in connect
cartography-cartography-1 | s = BoltSocket._connect(resolved_address, timeout, keep_alive)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/_socket.py", line 164, in _connect
cartography-cartography-1 | raise ServiceUnavailable(
cartography-cartography-1 | neo4j.exceptions.ServiceUnavailable: Failed to establish connection to ResolvedIPv4Address(('127.0.0.1', 7687)) (reason [Errno 111] Connection refused)
cartography-cartography-1 |
cartography-cartography-1 | The above exception was the direct cause of the following exception:
cartography-cartography-1 |
cartography-cartography-1 | Traceback (most recent call last):
cartography-cartography-1 | File "/usr/local/bin/cartography", line 33, in <module>
cartography-cartography-1 | sys.exit(load_entry_point('cartography', 'console_scripts', 'cartography')())
cartography-cartography-1 | File "/var/cartography/cartography/cli.py", line 553, in main
cartography-cartography-1 | sys.exit(CLI(default_sync, prog='cartography').main(argv))
cartography-cartography-1 | File "/var/cartography/cartography/cli.py", line 533, in main
cartography-cartography-1 | return cartography.sync.run_with_config(self.sync, config)
cartography-cartography-1 | File "/var/cartography/cartography/sync.py", line 163, in run_with_config
cartography-cartography-1 | return sync.run(neo4j_driver, config)
cartography-cartography-1 | File "/var/cartography/cartography/sync.py", line 81, in run
cartography-cartography-1 | with neo4j_driver.session() as neo4j_session:
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 167, in wrapper
cartography-cartography-1 | patch_session_obj(neo4j_session)
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 155, in patch_session_obj
cartography-cartography-1 | detect_neo4j_version(neo4j_session)
cartography-cartography-1 | File "/var/cartography/cartography/experimental_neo4j_4x_support.py", line 143, in detect_neo4j_version
cartography-cartography-1 | result = neo4j_session.run(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/simple.py", line 204, in run
cartography-cartography-1 | self._connect(self._config.default_access_mode)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/simple.py", line 108, in _connect
cartography-cartography-1 | super()._connect(access_mode)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/work/__init__.py", line 82, in _connect
cartography-cartography-1 | self._connection = self._pool.acquire(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 926, in acquire
cartography-cartography-1 | return self._acquire(self.address, deadline)
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 801, in _acquire
cartography-cartography-1 | return connection_creator()
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 729, in connection_creator
cartography-cartography-1 | connection = self.opener(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 907, in opener
cartography-cartography-1 | return Bolt.open(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/__init__.py", line 327, in open
cartography-cartography-1 | s, pool_config.protocol_version, handshake, data = BoltSocket.connect(
cartography-cartography-1 | File "/usr/local/lib/python3.9/site-packages/neo4j/io/_socket.py", line 311, in connect
cartography-cartography-1 | raise ServiceUnavailable(
cartography-cartography-1 | neo4j.exceptions.ServiceUnavailable: Couldn't connect to localhost:7687 (resolved to ('127.0.0.1:7687', '[::1]:7687')):
cartography-cartography-1 | Failed to establish connection to ResolvedIPv4Address(('127.0.0.1', 7687)) (reason [Errno 111] Connection refused)
cartography-cartography-1 | Failed to establish connection to ResolvedIPv6Address(('::1', 7687, 0, 0)) (reason [Errno 99] Cannot assign requested address)
cartography-cartography-1 exited with code 1
Docker compose:
version: "3.7"
services:
neo4j:
image: neo4j:4.4.5-community
restart: unless-stopped
ports:
- "7474:7474"
- "7687:7687"
volumes:
- ./.compose/neo4j/conf:/conf
- ./.compose/neo4j/data:/data
- ./.compose/neo4j/import:/import
- ./.compose/neo4j/logs:/logs
- ./.compose/neo4j/plugins:/plugins
environment:
# Raise memory limits:
- dbms.memory.pagecache.size=1G
- dbms.memory.heap.initial_size=1G
- dbms.memory.heap.max_size=1G
# Auth:
- NEO4J_AUTH=none
# Add APOC and GDS:
- apoc.export.file.enabled=true
- apoc.import.file.enabled=true
- apoc.import.file.use_neo4j_config=true
- NEO4JLABS_PLUGINS=["apoc", "graph-data-science"]
- dbms.security.procedures.allowlist=gds.*, apoc.*
- dbms.security.procedures.unrestricted=gds.*, apoc.*
# Networking:
- dbms.default_listen_address=0.0.0.0
- dbms.connector.bolt.listen_address=:7687
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:7474"]
interval: 10s
timeout: 10s
retries: 10
cartography:
build:
context: .
dockerfile: dist.Dockerfile
init: true
restart: on-failure
depends_on:
- neo4j
environment:
- EXPERIMENTAL_NEO4J_4X_SUPPORT=True
volumes:
- /Users/stevesolun/.aws:/root/.aws/
Dockerfile:
FROM python:3.9-slim
# the UID and GID to run cartography as
# (https://github.com/hexops/dockerfile#do-not-use-a-uid-below-10000).
ARG uid=0 #10001
ARG gid=0 #10001
COPY . /var/cartography
WORKDIR /var/cartography
RUN apt-get update --fix-missing
RUN apt --allow-unauthenticated update -y
RUN apt-get install vim telnet curl gcc -y
RUN pip install -U -e .
USER ${uid}:${gid}
# verify that the binary at least runs
RUN cartography -h
ENTRYPOINT ["cartography"]
CMD ["-v", "--neo4j-uri=bolt://localhost:7687", "--aws-sync-all-profiles"]
I have run into this problem before. When you use localhost url inside a container, it doesn't treat it as the host localhost, but inside of the container AFAIK.
There are two options you can go about. One is to add:
network_mode: host
to the cartography service.Then the localhost points to the host machine and it should find Neo4j at localhost:7474.
Although I don't prefer this option. When using docker-compose, you can add the linking to your cartography service:
links:
- neo4j
It seems that links are deprecated, so it probably works even without it. So essentially instead of specifying --neo4j-uri=bolt://localhost:7687 you can specify --neo4j-uri=bolt://neo4j:7687 to point at Neo4j instance container. The URL is identical to the service name

Redis and flask app, web container does not want to start IndexError: pop from empty list

I am trying to solve a college assignment run an application written in flask + redis. I have to use a docker for this. Using docker-compose up redis starts correctly however the flask application throws errors and I don't really know why.
Some code of the flask application
If I understand this correctly, I need to declare environment variables in the Dockefile because it is from these that the application will get the ip address and port of Redis
app = Flask(__name__)
redis = redis.Redis(host=os.environ.get('REDIS_HOST'),
password=None,
port=os.environ.get('REDIS_PORT'),
db=0)
My Dockerfile
ARG PYTHON_VERSION=3.7-alpine
FROM python:${PYTHON_VERSION}
ENV REDIS_HOST 127.0.0.1 \
REDIS_PORT 6379
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8000", "main:app"]
And docker-compose
version: "3"
services:
web:
build: .
container_name: "python_app"
ports:
- "8000:8000"
depends_on:
- redis
redis:
image: "redis:alpine"
container_name: "redis"
ports:
- "6379:6379"
docker-compose build
Creating network "tt_default" with the default driver
Pulling redis (redis:alpine)...
alpine: Pulling from library/redis
Digest: sha256:fa785f9bd167b94a6b30210ae32422469f4b0f805f4df12733c2f177f500d1ba
Status: Downloaded newer image for redis:alpine
Building web
Sending build context to Docker daemon 10.75kB
Step 1/7 : ARG PYTHON_VERSION=3.7-alpine
Step 2/7 : FROM python:${PYTHON_VERSION}
---> a436fb2c575c
Step 3/7 : ENV REDIS_HOST 127.0.0.1 REDIS_PORT 6379
---> Running in ad3a17ce15e9
Removing intermediate container ad3a17ce15e9
---> 937330185f34
Step 4/7 : COPY requirements.txt .
---> d81cbb22f113
Step 5/7 : RUN pip install -r requirements.txt
---> Running in 1c0bac282a92
Collecting Flask==1.1.2
Downloading Flask-1.1.2-py2.py3-none-any.whl (94 kB)
Collecting redis==3.4.1
Downloading redis-3.4.1-py2.py3-none-any.whl (71 kB)
Collecting gunicorn<20,>=19
Downloading gunicorn-19.10.0-py2.py3-none-any.whl (113 kB)
Collecting itsdangerous>=0.24
Downloading itsdangerous-2.0.1-py3-none-any.whl (18 kB)
Collecting click>=5.1
Downloading click-8.0.1-py3-none-any.whl (97 kB)
Collecting Jinja2>=2.10.1
Downloading Jinja2-3.0.1-py3-none-any.whl (133 kB)
Collecting Werkzeug>=0.15
Downloading Werkzeug-2.0.1-py3-none-any.whl (288 kB)
Collecting importlib-metadata
Downloading importlib_metadata-4.8.1-py3-none-any.whl (17 kB)
Collecting MarkupSafe>=2.0
Downloading MarkupSafe-2.0.1.tar.gz (18 kB)
Collecting typing-extensions>=3.6.4
Downloading typing_extensions-3.10.0.2-py3-none-any.whl (26 kB)
Collecting zipp>=0.5
Downloading zipp-3.5.0-py3-none-any.whl (5.7 kB)
Building wheels for collected packages: MarkupSafe
Building wheel for MarkupSafe (setup.py): started
Building wheel for MarkupSafe (setup.py): finished with status 'done'
Created wheel for MarkupSafe: filename=MarkupSafe-2.0.1-py3-none-any.whl size=9761 sha256=43b5e0d8ef8bcbadc8e8d6845f85b770ad2b918760d7541b8c3f9c403ab04b14
Stored in directory: /root/.cache/pip/wheels/1a/18/04/e3b5bd888f000c2716bccc94a565239f9defc47ef93d9e7bea
Successfully built MarkupSafe
Installing collected packages: zipp, typing-extensions, MarkupSafe, importlib-metadata, Werkzeug, Jinja2, itsdangerous, click, redis, gunicorn, Flask
Successfully installed Flask-1.1.2 Jinja2-3.0.1 MarkupSafe-2.0.1 Werkzeug-2.0.1 click-8.0.1 gunicorn-19.10.0 importlib-metadata-4.8.1 itsdangerous-2.0.1 redis-3.4.1 typing-extensions-3.10.0.2 zipp-3.5.0
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Removing intermediate container 1c0bac282a92
---> 6dddf2a4ad27
Step 6/7 : COPY . .
---> 37bd8f541844
Step 7/7 : CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8000", "main:app"]
---> Running in f19c3226fff2
Removing intermediate container f19c3226fff2
---> 861a5c53a545
Successfully built 861a5c53a545
Successfully tagged tt_web:latest
WARNING: Image for service web was built because it did not already exist. To rebuild this image you must use `docker-compose build` or `docker-compose up --build`.
Creating redis ... done
Creating python_app ... done
Attaching to redis, python_app
Part of the logs which I get after using docker-compose up
Attaching to redis, python_app
redis | 1:C 27 Sep 2021 14:36:16.063 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
redis | 1:C 27 Sep 2021 14:36:16.063 # Redis version=6.2.5, bits=64, commit=00000000, modified=0, pid=1, just started
redis | 1:C 27 Sep 2021 14:36:16.063 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
redis | 1:M 27 Sep 2021 14:36:16.064 * monotonic clock: POSIX clock_gettime
redis | 1:M 27 Sep 2021 14:36:16.065 * Running mode=standalone, port=6379.
redis | 1:M 27 Sep 2021 14:36:16.065 # Server initialized
redis | 1:M 27 Sep 2021 14:36:16.065 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
redis | 1:M 27 Sep 2021 14:36:16.065 * Ready to accept connections
python_app | [2021-09-27 14:36:16 +0000] [1] [INFO] Starting gunicorn 19.10.0
python_app | [2021-09-27 14:36:16 +0000] [1] [INFO] Listening at: http://0.0.0.0:8000 (1)
python_app | [2021-09-27 14:36:16 +0000] [1] [INFO] Using worker: sync
python_app | [2021-09-27 14:36:16 +0000] [8] [INFO] Booting worker with pid: 8
python_app | [2021-09-27 14:36:16 +0000] [9] [INFO] Booting worker with pid: 9
python_app | [2021-09-27 14:36:16 +0000] [8] [ERROR] Exception in worker process
python_app | Traceback (most recent call last):
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1179, in get_connection
python_app | connection = self._available_connections.pop()
python_app | IndexError: pop from empty list
python_app |
python_app | During handling of the above exception, another exception occurred:
python_app |
python_app | Traceback (most recent call last):
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 586, in spawn_worker
python_app | worker.init_process()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 135, in init_process
python_app | self.load_wsgi()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 144, in load_wsgi
python_app | self.wsgi = self.app.wsgi()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/base.py", line 67, in wsgi
python_app | self.callable = self.load()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 52, in load
python_app | return self.load_wsgiapp()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 41, in load_wsgiapp
python_app | return util.import_app(self.app_uri)
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/util.py", line 350, in import_app
python_app | __import__(module)
python_app | File "/main.py", line 21, in <module>
python_app | redis.set('sessionvisitors', 0)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/client.py", line 1766, in set
python_app | return self.execute_command('SET', *pieces)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/client.py", line 875, in execute_command
python_app | conn = self.connection or pool.get_connection(command_name, **options)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1181, in get_connection
python_app | connection = self.make_connection()
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1220, in make_connection
python_app | return self.connection_class(**self.connection_kwargs)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 502, in __init__
python_app | self.port = int(port)
python_app | TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
python_app | [2021-09-27 14:36:16 +0000] [8] [INFO] Worker exiting (pid: 8)
python_app | [2021-09-27 14:36:16 +0000] [10] [INFO] Booting worker with pid: 10
python_app | [2021-09-27 14:36:16 +0000] [9] [ERROR] Exception in worker process
python_app | Traceback (most recent call last):
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1179, in get_connection
python_app | connection = self._available_connections.pop()
python_app | IndexError: pop from empty list
python_app |
python_app | During handling of the above exception, another exception occurred:
python_app |
python_app | Traceback (most recent call last):
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 586, in spawn_worker
python_app | worker.init_process()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 135, in init_process
python_app | self.load_wsgi()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 144, in load_wsgi
python_app | self.wsgi = self.app.wsgi()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/base.py", line 67, in wsgi
python_app | self.callable = self.load()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 52, in load
python_app | return self.load_wsgiapp()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 41, in load_wsgiapp
python_app | return util.import_app(self.app_uri)
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/util.py", line 350, in import_app
python_app | __import__(module)
python_app | File "/main.py", line 21, in <module>
python_app | redis.set('sessionvisitors', 0)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/client.py", line 1766, in set
python_app | return self.execute_command('SET', *pieces)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/client.py", line 875, in execute_command
python_app | conn = self.connection or pool.get_connection(command_name, **options)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1181, in get_connection
python_app | connection = self.make_connection()
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 1220, in make_connection
python_app | return self.connection_class(**self.connection_kwargs)
python_app | File "/usr/local/lib/python3.7/site-packages/redis/connection.py", line 502, in __init__
python_app | self.port = int(port)
python_app | TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
python_app | [2021-09-27 14:36:16 +0000] [9] [INFO] Worker exiting (pid: 9)
python_app | Traceback (most recent call last):
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 203, in run
python_app | self.manage_workers()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 548, in manage_workers
python_app | self.spawn_workers()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 620, in spawn_workers
python_app | time.sleep(0.1 * random.random())
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 245, in handle_chld
python_app | self.reap_workers()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 528, in reap_workers
python_app | raise HaltServer(reason, self.WORKER_BOOT_ERROR)
python_app | gunicorn.errors.HaltServer: <HaltServer 'Worker failed to boot.' 3>
python_app |
python_app | During handling of the above exception, another exception occurred:
python_app |
python_app | Traceback (most recent call last):
python_app | File "/usr/local/bin/gunicorn", line 8, in <module>
python_app | sys.exit(run())
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 61, in run
python_app | WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/base.py", line 223, in run
python_app | super(Application, self).run()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/app/base.py", line 72, in run
python_app | Arbiter(self).run()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 232, in run
python_app | self.halt(reason=inst.reason, exit_status=inst.exit_status)
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 345, in halt
python_app | self.stop()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 396, in stop
python_app | time.sleep(0.1)
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 245, in handle_chld
python_app | self.reap_workers()
python_app | File "/usr/local/lib/python3.7/site-packages/gunicorn/arbiter.py", line 528, in reap_workers
python_app | raise HaltServer(reason, self.WORKER_BOOT_ERROR)
python_app | gunicorn.errors.HaltServer: <HaltServer 'Worker failed to boot.' 3>
The app error you're seeing is because the Redis connection is failing to open, if you look at the stack trace closely. The reason for the failed connection is that REDIS_HOST is set incorrectly to 127.0.0.1 inside your Dockerfile. To fix this, the values for REDIS_HOST and REDIS_PORT should actually be passed into your app container by docker-compose, since that's the layer that actually knows where Redis lives. Your Dockerfile is just for your app container, which depends on Redis but doesn't have a clue where it might be running.
Since compose makes services available at hostnames equal to the service name by default, Redis should be reachable at just tcp://redis:6379, so I would give these values a shot to start with:
web:
build: .
container_name: "python_app"
ports:
- "8000:8000"
depends_on:
- redis
environment:
REDIS_HOST: redis
REDIS_PORT: 6379

`docker-compose up` : sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedTable) relation "connection" does not exist

I'm trying to launch airflow UI using docker for a data pipeline project using an AWS redshift cluster. I planning to use LocalExecutor in Airflow. I specified within the airflow.cfg file the sql_alchemy_conn variable with the AWS redshift cluster information. Connection string format: sql_alchemy_conn = postgresql+psycopg2://user:password#clusterinfo.region.redshift.amazonaws.com:5439/db
After running docker build -t my-airflow . successfully, docker-compose up outputs the error below:
webserver_1 | Traceback (most recent call last):
webserver_1 | File "/usr/local/bin/airflow", line 37, in <module>
webserver_1 | args.func(args)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/utils/cli.py", line 75, in wrapper
webserver_1 | return f(*args, **kwargs)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/bin/cli.py", line 1040, in scheduler
webserver_1 | job.run()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/jobs/base_job.py", line 215, in run
webserver_1 | session.commit()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1042, in commit
webserver_1 | self.transaction.commit()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 504, in commit
webserver_1 | self._prepare_impl()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 483, in _prepare_impl
webserver_1 | self.session.flush()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
webserver_1 | self._flush(objects)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
webserver_1 | transaction.rollback(_capture_exception=True)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
webserver_1 | compat.raise_(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
webserver_1 | raise exception
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
webserver_1 | flush_context.execute()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
webserver_1 | rec.execute(self)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/unitofwork.py", line 586, in execute
webserver_1 | persistence.save_obj(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 239, in save_obj
webserver_1 | _emit_insert_statements(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/persistence.py", line 1135, in _emit_insert_statements
webserver_1 | result = cached_connections[connection].execute(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
webserver_1 | return meth(self, multiparams, params)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
webserver_1 | return connection._execute_clauseelement(self, multiparams, params)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1133, in _execute_clauseelement
webserver_1 | ret = self._execute_context(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1213, in _execute_context
webserver_1 | self._handle_dbapi_exception(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1517, in _handle_dbapi_exception
webserver_1 | util.raise_(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
webserver_1 | raise exception
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1211, in _execute_context
webserver_1 | context = constructor(dialect, self, conn, *args)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 816, in _init_compiled
webserver_1 | self._process_executesingle_defaults()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 1575, in _process_executesingle_defaults
webserver_1 | val = self.get_insert_default(c)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 2433, in get_insert_default
webserver_1 | return self._execute_scalar(exc, column.type)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 1103, in _execute_scalar
webserver_1 | conn._cursor_execute(self.cursor, stmt, default_params, context=self)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1360, in _cursor_execute
webserver_1 | self._handle_dbapi_exception(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1517, in _handle_dbapi_exception
webserver_1 | util.raise_(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
webserver_1 | raise exception
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1358, in _cursor_execute
webserver_1 | self.dialect.do_execute(cursor, statement, parameters, context)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
webserver_1 | cursor.execute(statement, parameters)
webserver_1 | sqlalchemy.exc.StatementError: (sqlalchemy.exc.ProgrammingError) (psycopg2.errors.UndefinedTable) relation "job_id_seq" does not exist
webserver_1 |
webserver_1 | [SQL: INSERT INTO job (id, dag_id, state, job_type, start_date, end_date, latest_heartbeat, executor_class, hostname, unixname) VALUES (%(id)s, %(dag_id)s, %(state)s, %(job_type)s, %(start_date)s, %(end_date)s, %(latest_heartbeat)s, %(executor_class)s, %(hostname)s, %(unixname)s)]
webserver_1 | [parameters: [{'job_type': 'SchedulerJob', 'unixname': 'root', 'executor_class': 'NoneType', 'hostname': 'fb44572de502', 'start_date': datetime.datetime(2020, 5, 1 ... (48 characters truncated) ... 'state': 'running', 'latest_heartbeat': datetime.datetime(2020, 5, 15, 4, 40, 59, 953160, tzinfo=<Timezone [UTC]>), 'dag_id': None, 'end_date': None}]]
webserver_1 | (Background on this error at: http://sqlalche.me/e/f405)
webserver_1 | Traceback (most recent call last):
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1283, in _execute_context
webserver_1 | self.dialect.do_execute(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
webserver_1 | cursor.execute(statement, parameters)
webserver_1 | psycopg2.errors.UndefinedTable: relation "connection" does not exist
webserver_1 |
webserver_1 |
webserver_1 | The above exception was the direct cause of the following exception:
webserver_1 |
webserver_1 | Traceback (most recent call last):
webserver_1 | File "/usr/local/bin/airflow", line 37, in <module>
webserver_1 | args.func(args)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/utils/cli.py", line 75, in wrapper
webserver_1 | return f(*args, **kwargs)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/bin/cli.py", line 900, in webserver
webserver_1 | app = cached_app_rbac(None) if settings.RBAC else cached_app(None)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/www/app.py", line 233, in cached_app
webserver_1 | app = create_app(config, testing)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/www/app.py", line 88, in create_app
webserver_1 | from airflow.www import views
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 2443, in <module>
webserver_1 | class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 2534, in ChartModelView
webserver_1 | 'conn_id': _connection_ids()
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/utils/db.py", line 74, in wrapper
webserver_1 | return func(*args, **kwargs)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 2437, in _connection_ids
webserver_1 | return [(c.conn_id, c.conn_id) for c in (
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3481, in __iter__
webserver_1 | return self._execute_and_instances(context)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3506, in _execute_and_instances
webserver_1 | result = conn.execute(querycontext.statement, self._params)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
webserver_1 | return meth(self, multiparams, params)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
webserver_1 | return connection._execute_clauseelement(self, multiparams, params)
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1133, in _execute_clauseelement
webserver_1 | ret = self._execute_context(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1323, in _execute_context
webserver_1 | self._handle_dbapi_exception(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1517, in _handle_dbapi_exception
webserver_1 | util.raise_(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
webserver_1 | raise exception
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1283, in _execute_context
webserver_1 | self.dialect.do_execute(
webserver_1 | File "/usr/local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
webserver_1 | cursor.execute(statement, parameters)
webserver_1 | sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedTable) relation "connection" does not exist
webserver_1 |
webserver_1 | [SQL: SELECT connection.conn_id AS connection_conn_id
webserver_1 | FROM connection GROUP BY connection.conn_id]
webserver_1 | (Background on this error at: http://sqlalche.me/e/f405)
Dockerfile:
FROM python:3
WORKDIR /usr/local/airflow/
ENV AIRFLOW_HOME=/usr/local/airflow
COPY requirements.txt ./
RUN pip install --upgrade pip && \
pip install --no-cache-dir -r requirements.txt
COPY script/entrypoint.sh /entrypoint.sh
COPY config/airflow.cfg $AIRFLOW_HOME/airflow.cfg
ENTRYPOINT ["/entrypoint.sh"]
CMD ["webserver"]
docker-compose.yml:
version: '3.7'
services:
postgres:
image: postgres:9.6
environment:
- POSTGRES_USER=x
- POSTGRES_PASSWORD=x
- POSTGRES_DB=x
logging:
options:
max-size: 10m
max-file: "3"
ports:
- "5439"
webserver:
image: my-airflow
build:
context: .
args:
AIRFLOW_DEPS: "postgres, aws"
restart: always
depends_on:
- postgres
environment:
- LOAD_EX=n
- EXECUTOR=Local
logging:
options:
max-size: 10m
max-file: "3"
volumes:
- ./dags:/usr/local/airflow/dags
- ./plugins:/usr/local/airflow/plugins
- ./requirements.txt:/requirements.txt
ports:
- "8080"
command: webserver
# healthcheck:
# test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
# interval: 30s
# timeout: 30s
# retries: 3
entrypoint.sh:
#!/usr/bin/env bash
airflow initdb
airflow scheduler &
exec airflow webserver
requirements.txt:
apache-airflow==1.10.9
cryptography==2.9.2
docutils==0.15.2
boto3==1.12.41
notebook==6.0.3
numpy==1.18.1
pandas==0.25.3
psycopg2-binary==2.8.5
typing-extensions==3.7.4.2

Resources