I use the database on postqres and backends on python and other services. I need to transfer database structure using alembic.
Structure of the program:
docker-compose.yml:
version: "3.9"
services:
db:
container_name: postgresql_db
image: postgres
restart: always
ports:
- "5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
- POSTGRES_DB=carslist
pgadmin:
container_name: pgadmin
image: dpage/pgadmin4
environment:
- PGADMIN_DEFAULT_EMAIL=admin#admin.com
- PGADMIN_DEFAULT_PASSWORD=admin
ports:
- "5050:80"
depends_on:
- db
server:
build: ./backend
restart: always
ports:
- "8000:8000"
depends_on:
- db
app:
container_name: app
build: .
restart: always
ports:
- "3000:3000"
depends_on:
- server
server(backend) dockerfile:
FROM python:3.9
WORKDIR /app
COPY requirements.txt /app/requirements.txt
#RUN pip3 install --upgrade pip
RUN pip3 install -r requirements.txt
COPY . .
EXPOSE 8000
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
env.py:
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
import os, sys
from dotenv import load_dotenv
from app.database import DATABASE_URL
from app.base import Base
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
load_dotenv(os.path.join(BASE_DIR, ".env"))
sys.path.append(BASE_DIR)
config = context.config
config.set_main_option("sqlalchemy.url", os.environ["DATABASE_URL"])
if config.config_file_name is not None:
fileConfig(config.config_file_name)
target_metadata = Base.metadata
def run_migrations_offline() -> None:
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
include_schemas=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
#configuration['sqlalchemy.url'] = DATABASE_URL
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
version_table_schema=target_metadata.schema,
include_schemas=True
)
with context.begin_transaction():
context.execute('SET search_path TO public')
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
alembic.ini:
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = migratefile
prepend_sys_path = .
version_path_separator = os
sqlalchemy.url =
[post_write_hooks]
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
After docker-compose is started ("docker-compose build" and "docker-compose up"), I run the process in the server container to execute the command:
docker-compose run server alembic revision --autogenerate -m "firstmigrate"
Get the following:
Then I execute the command:
docker-compose run server alembic upgrade head
Get the following:
The migration file in versions did not create and the table did not appear in the database. I looked at the files in the container itself, and there was no migration file either. So what was written as "generated" is actually nowhere.
After migration, only the empty alembic_version table appeared in the database.
models.py:
import datetime as _dt
import sqlalchemy as _sql
import sqlalchemy.ext.declarative as _declarative
import sqlalchemy.orm as _orm
import app.database as _database
class Car(_database.Base):
__tablename__ = "cars"
license_plate = _sql.Column(_sql.String(50), primary_key=True)
model = _sql.Column(_sql.String(250), default=None)
owner = _sql.Column(_sql.Integer, default=None)
vehicle_mileage = _sql.Column(_sql.Integer, default=None)
database.py:
import sqlalchemy as _sql
import sqlalchemy.ext.declarative as _declarative
import sqlalchemy.orm as _orm
from dotenv import load_dotenv
load_dotenv(".env")
DATABASE_URL = "postgresql+psycopg2://postgres:password#db:5432/carslist"
engine = _sql.create_engine(DATABASE_URL)
SessionLocal = _orm.sessionmaker(autocommit=False,
autoflush=False,
bind=engine)
Base = _declarative.declarative_base()
Related
I want to create simple Kotlin app that uses PostgresSql and Kotlin Ktor, everything should be embedded in docker container.
So far I managed to run separately PostgresSql and PgAdmin which connected to each other successfully and I created docker-compose.yml file for that that works fine form me. The problem starts when I want to add to it my Kotlin app.
Here is my docker-compose.yml file
version: "3.9"
networks:
m8network:
ipam:
config:
- subnet: 172.20.0.0/24
services:
postgres:
image: postgres
environment:
- "POSTGRES_USER=SomeFancyUser"
- "POSTGRES_PASSWORD=pwd"
- "POSTGRES_DB=MSC8"
ports:
- "5432:5432"
volumes:
# - postgres-data:/var/lib/postgresql/data
- D:\docker\myApp\data:/var/lib/postgresql/data
networks:
m8network:
ipv4_address: 172.20.0.6
pgadmin:
image: dpage/pgadmin4
depends_on:
- postgres
environment:
- "PGADMIN_DEFAULT_EMAIL=SomeFancyUser#domain.com"
- "PGADMIN_DEFAULT_PASSWORD=pwd"
# - "PGADMIN_ENABLE_TLS=False"
ports:
- "5001:80"
networks:
m8network:
app:
build: .
ports:
- "5000:8080"
links:
- postgres
depends_on:
- postgres
restart: on-failure
networks:
m8network:
#volumes:
# postgres-data:
# driver: local
And heres is my app source code.
package com.something.m8
import com.squareup.sqldelight.db.SqlDriver
import com.squareup.sqldelight.sqlite.driver.asJdbcDriver
import com.zaxxer.hikari.HikariConfig
import com.zaxxer.hikari.HikariDataSource
import io.ktor.application.*
import io.ktor.html.*
import io.ktor.http.*
import io.ktor.response.*
import io.ktor.routing.*
import io.ktor.server.engine.*
import io.ktor.server.netty.*
import kotlinx.html.*
import java.io.PrintWriter
import java.util.*
fun HTML.index() {
head {
title("Hello from Ktor!")
}
body {
div {
+"Hello from Ktor"
}
}
}
fun main() {
println("starting app")
val props = Properties()
props.setProperty("dataSourceClassName", "org.postgresql.ds.PGSimpleDataSource")
props.setProperty("dataSource.user", "SomeFancyUser")
props.setProperty("dataSource.password", "pwd")
props.setProperty("dataSource.databaseName", "M8")
props.setProperty("dataSource.portNumber", "5432")
props.setProperty("dataSource.serverName", "172.20.0.6")
props["dataSource.logWriter"] = PrintWriter(System.out)
println("a")
val config = HikariConfig(props)
println("b")
val ds = HikariDataSource(config)
println("c")
val driver: SqlDriver = ds.asJdbcDriver()
println("d")
MSC8.Schema.create(driver)
println("e")
embeddedServer(Netty, port = 8080,
// host = "127.0.0.1"
) {
routing {
get("/") {
call.respondHtml(HttpStatusCode.OK, HTML::index)
}
get("/m8/{code}") {
val code = call.parameters["code"]
println("code $code")
call.respondRedirect("https://google.com")
}
}
}.start(wait = true)
}
And the Dockerfile for app
#FROM openjdk:8
FROM gradle:6.7-jdk8
WORKDIR /var/www/html
RUN mkdir -p ./app/
WORKDIR /var/www/html/app
COPY build.gradle.kts .
COPY gradle.properties .
COPY settings.gradle.kts .
COPY Redirect/src ./Redirect/src
COPY Redirect/build.gradle.kts ./Redirect/build.gradle.kts
COPY gradlew .
COPY gradle ./gradle
EXPOSE 8080
USER root
WORKDIR /var/www/html
RUN pwd
RUN ls
RUN chown -R gradle ./app
USER gradle
WORKDIR /var/www/html/app
RUN ./gradlew run
With this setup I have two problems
First problem:
When I run docker-compose.exe up --build I receive exception HikariPool$PoolInitializationException: Failed to initialize pool: The connection attempt failed. on line val ds = HikariDataSource(config)
I set up static ip for postgres (172.20.0.6) and when I'm using this ip in PGAdmin it works so why my app cannot connect to the postgres?
Second problem:
I tried to test if app is starting properly and everything works fine in most basics. So I commented all source code related to the connection to the DB since that point when I run docker-compose.exe up --build my app displays only letter e from line println("e") and at this point everything seems to be frozen, postgres and PGAdming doesn't startup, and after that container seems to be unresponsive and app doesn't respond on port 5000 or 8080. Is there any way that I can run app so it won't block exectution of other parts?
First problem:
I started using host name instead IP adress so now I'm using postgres instead 172.20.0.6. And the rest of it is connected to second problem
Second problem:
The issue was that I was starting the app during build phase of container.
Instead RUN ./gradlew run I used
RUN gradle build
ENTRYPOINT ["gradle","run"]
Also I noticed that I don't have to use gradle wrapper while I'm using FROM gradle:6.7-jdk8
Now everyting is working fine.
I have a docker-compose file as seen below. The app and the flask are separate containers. I cannot connect to "python" container although both are on the same network. However, if I expose the port 5000 to the outside via port (e.g. - port: "9000:5000") then it is accessible. However, I only want "app" to access the "python" internally and not from outside of host.
Isn't this possible?
version: '3'
services:
python:
build:
context: ./docker/python
image: python:3.6.12
volumes:
- IQData:/NMIQV2/Data
- IQCode:/NMIQV2/Code
- IQAnalysis:/NMIQV2/Analysis
networks:
- base-network app:
build:
context: .
ports:
- "8080:80"
- "5000:5000"
networks:
- base-network
links:
- redis
- mongo
- python
depends_on:
- redis
- mongo
- python networks:
base-network:
driver: bridge
python container Docker File:
FROM python:3.6.12 EXPOSE 5000
EXPOSE 5000
# set the working directory in the container
WORKDIR /NMIQV2/Code
# copy the content of the local src directory to the working directory
COPY src/ .
# copy the dependencies file to the working directory
COPY requirements.txt .
# install dependencies
RUN pip install -r requirements.txt
server.py
from flask import Flask
server = Flask(__name__)
#server.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
server.run(host='0.0.0.0', port=5000)
as the question says. Here it is my situation.
My project folder is:
PROJ
|____docker-compose.yml
|____servDir/
|____Dockerfile
|____server.py
In the docker-compose.yml:
service1:
image: img1:v0.1
container_name: cont1
build: ./servDir/.
ports:
- "5001:5002"
server.py:
from flask import Flask, request
import json
app = Flask(__name__)
PORT = 5001
#app.route("/greetings/")
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int(PORT), debug=True)
When I run docker-compose up and go to http://localhost:5001/greetings/ I receive a ERR_CONNECTION_REFUSED.
Instead, if I set ports as 5001:5001, I'm able to receive the page content.
Why? Should I set them always equals, in order to reach the container by browser?
I thought that ports configuration was HOST:CONTAINER, and that browser would be a HOST sevice.
UPDATE:
Dockerfile:
FROM python:3
WORKDIR /home/python/app/
COPY . /home/python/app/
RUN chmod a+x *.py
CMD ["python", "./server.py"]
This is right : HOST:CONTAINER
Try to use this to expose it for your localhost and LAN :
service1:
image: img1:v0.1
container_name: cont1
build: ./servDir/.
ports:
- "0.0.0.0:5001:5002"
or this to only your localhost :
service1:
image: img1:v0.1
container_name: cont1
build: ./servDir/.
ports:
- "127.0.0.1:5001:5002"
Also, you wrote :
When I run docker-compose up and go to http://localhost:6002/greetings/ I receive a ERR_CONNECTION_REFUSED.
Looking at your docker compose you should access it like that instead:
http://localhost:6002 --> http://localhost:5001
Change server.py config :
from flask import Flask, request
import json
app = Flask(__name__)
PORT = 5002
#app.route("/greetings/")
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int(PORT), debug=True)
I am packing my application into jar and run it with Docker using bash script (which executes jar). I would like to change neo4j database url configuration as environment variable in docker-compose file, however, I still get this error:
Exception in thread "main" com.typesafe.config.ConfigException$NotResolved: neo4j.url has not been resolved, you need to call Config#resolve(), see API docs for Config#resolve()
How can I solve this problem and where should I add some configurations for that?
I have only set up url variable in configurations file:
neo4j{
url= "bolt://localhost:7687"
url = ${?HOSTNAME}
user = "user"
password = "password"
}
Also, I use this variable in configurations method:
def getNeo4jConfig(configName: String) = {
val neo4jLocalConfig = ConfigFactory.parseFile(new File("configs/local_neo4j.conf"))
neo4jLocalConfig.resolve()
val driver = configName match {
case "neo4j_local" => GraphDatabase.driver(neo4jLocalConfig.getString("neo4j.url"),
AuthTokens.basic(neo4jLocalConfig.getString("neo4j.user"), neo4jLocalConfig.getString("neo4j.password")))
case _ => GraphDatabase.driver("url", AuthTokens.basic("user", "password"))
}
driver.session
}
In docker-compose.yml I defined the value of the hostname:
version: '3.3'
services:
neo4j_db:
image: neo4j:latest
ports:
- "7474:7474"
- "7473:7473"
- "7687:7687"
volumes:
- $HOME/neo4j/import:/var/lib/neo4j/import
- $HOME/neo4j/data:/neo4j/data
- $HOME/neo4j/conf:/neo4j/conf
- $HOME/neo4j/logs:/neo4j/logs
environment:
- NEO4J_dbms_active__database=graphImport.db
benchmarks:
image: "container"
volumes:
- ./:/workdir1
working_dir: /workdir1
links:
- neo4j_db
environment:
- HOSTNAME=myhoat
Also, bash script looks like this:
#!/usr/bin/env bash
for run in {1..2}
do
java -cp "target/scala-2.11/benchmarking.jar" benchmarks/Main $1 $2
done
Set these env in a .env file in the same location than your
docker-compose.yml file:
.env
VAR1=value
VAR2=2.0
VAR3=`awk -F ':' '{if ($3 == 1000) {print $1}}' /etc/passwd` <-- any bash command, for example
...
And for example, use them in your compose file sections:
docker-compose.yml
...
build:
dockerfile: Dockerfile_${VAR2}
...
I defined hostname as arg in Dockerfile and then defined it in docker-compose file for the application build. That solved the problem!
Docker-compose file:
version: '3.3'
services:
benchmarks-app:
build:
context: .
dockerfile: Dockerfile
args:
- HOST=neo4jdb
volumes:
- ./:/workdir
working_dir: /workdir
Dockerfile args definition:
FROM java:8
ARG HOST
ENV SCALA_VERSION 2.11.8
ENV SBT_VERSION 1.1.1
ENV SPARK_VERSION 2.2.0
ENV SPARK_DIST spark-$SPARK_VERSION-bin-hadoop2.6
ENV SPARK_ARCH $SPARK_DIST.tgz
ENV NEO4J_CONFIG $DB_CONFIG
ENV BENCHMARK $BENCHMARK_NAME
ENV HOSTNAME bolt://$HOST:7687
...
I have already a mysql container named "mysqlDemoStorage" running, exposing port 3306 to 0.0.0.0:3306. I also have a flask app which provides a login page and table-displaying page. The flask app works quite well in host. The login page connects to "user" table in the mysql container and the table-displaying page connects to another table holding all the data to display.
The docker-compose file I used to create the mysql container is as follows:
version: '3'
services:
mysql:
container_name: mysqlDemoStorage
environment:
MYSQL_ROOT_PASSWORD: "demo"
command:
--character-set-server=utf8
ports:
- 3306:3306
image: "docker.io/mysql:latest"
restart: always
Now I want to dockerize the flask app so that I can still view the app from host. The mysql container detail is as followed:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c48955b3589e mysql:latest "docker-entrypoint.s…" 13 days ago Up 49 minutes 0.0.0.0:3306->3306/tcp, 33060/tcp mysqlDemoStorage
The dockerfile of the flask app I wrote is as follows:
FROM python:latest
WORKDIR /storage_flask
ADD . /storage_flask
RUN pip install -r requirements.txt
EXPOSE 5000
ENTRYPOINT ["python","run.py"]
The flask image can be successfuly built, but when I run the image, I fail to load the page. One point I think that causes the problem is the init.py file to initiate the flask app, which is as follows:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = 'aafa4f8047ce31126011638be8530da6'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:demo#localhost:3306/storage'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = "login"
login_manager.login_message_category = 'info'
from storage_flask import routes
I was thinking passing the IP of the mysql container to the flask container as the config string for DB connection. But I'm not sure how to do it.
Could someone help to solve the problem? Thank you
change this line
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:demo#localhost:3306/storage'
to
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:demo#mysql:3306/storage'
You also need to make sure that both containers are connected to the same network, for that you need to update your docker-compose file to be something like the below file
version: '3.7'
networks:
my_network_name:
name: my_network_name
external: false
services:
mysql:
container_name: mysqlDemoStorage
environment:
MYSQL_ROOT_PASSWORD: "demo"
command:
--character-set-server=utf8
ports:
- 3306:3306
image: "docker.io/mysql:latest"
restart: always
networks:
- my_network_name
second file
version: '3.7'
networks:
my_network_name:
name: my_network_name
external: true
services:
python_app:
container_name: pythonDemoStorage
ports:
- 5000:5000
image: "Myimage"
restart: always
networks:
- my_network_name